Skip to content

Commit f49d95f

Browse files
authored
Merge pull request #49 from rouson/ford-commits-only
Replace PR #48: FORD commits only
2 parents 9e269ba + 59092ed commit f49d95f

9 files changed

+127
-90
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,4 @@
22
*.mod
33
build
44
data/*/*.dat
5+
doc

README.md

+13
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
1515
- [Training the network](https://github.com/modern-fortran/neural-fortran#training-the-network)
1616
- [Saving and loading from file](https://github.com/modern-fortran/neural-fortran#saving-and-loading-from-file)
1717
- [MNIST training example](https://github.com/modern-fortran/neural-fortran#mnist-training-example)
18+
* [API documentation](https://github.com/modern-fortran/neural-fortran#api-documentation)
1819
* [Contributing](https://github.com/modern-fortran/neural-fortran#contributing)
1920
* [Contributors](https://github.com/modern-fortran/neural-fortran#contributors)
2021
* [Related projects](https://github.com/modern-fortran/neural-fortran#related-projects)
@@ -369,6 +370,18 @@ for example on 16 cores using [OpenCoarrays](https://github.com/sourceryinstitut
369370
$ cafrun -n 16 ./example_mnist
370371
```
371372

373+
## API documentation
374+
375+
API documentation can be generated with [FORD](https://github.com/Fortran-FOSS-Programmers/ford/).
376+
Assuming you have FORD installed on your system, run
377+
378+
```
379+
ford ford.md
380+
```
381+
382+
from the neural-fortran top-level directory to generate the API documentation in doc/html.
383+
Point your browser to doc/html/index.html to read it.
384+
372385
## Contributing
373386

374387
neural-fortran is currently a proof-of-concept with potential for

ford.md

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
project:
2+
summary: A parallel neural net microframework
3+
src_dir: src
4+
output_dir: doc/html
5+
preprocess: true
6+
display: public
7+
protected
8+
private
9+
source: true
10+
graph: true
11+
md_extensions: markdown.extensions.toc
12+
coloured_edges: true
13+
sort: permission-alpha
14+
extra_mods: iso_fortran_env:https://gcc.gnu.org/onlinedocs/gfortran/ISO_005fFORTRAN_005fENV.html
15+
iso_c_binding:https://gcc.gnu.org/onlinedocs/gfortran/ISO_005fC_005fBINDING.html#ISO_005fC_005fBINDING
16+
author: Milan Curcic
17+
print_creation_date: true
18+
creation_date: %Y-%m-%d %H:%M %z
19+
project_github: https://github.com/modern-fortran/neural-fortran
20+
project_download: https://github.com/modern-fortran/neural-fortran/releases
21+
github: https://github.com/modern-fortran
22+
23+
{!README.md!}

src/mod_activation.f90

+13-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module mod_activation
22

3-
! A collection of activation functions and their derivatives.
3+
!! A collection of activation functions and their derivatives.
44

55
use mod_kinds, only: ik, rk
66

@@ -26,14 +26,14 @@ end function activation_function
2626
contains
2727

2828
pure function gaussian(x) result(res)
29-
! Gaussian activation function.
29+
!! Gaussian activation function.
3030
real(rk), intent(in) :: x(:)
3131
real(rk) :: res(size(x))
3232
res = exp(-x**2)
3333
end function gaussian
3434

3535
pure function gaussian_prime(x) result(res)
36-
! First derivative of the Gaussian activation function.
36+
!! First derivative of the Gaussian activation function.
3737
real(rk), intent(in) :: x(:)
3838
real(rk) :: res(size(x))
3939
res = -2 * x * gaussian(x)
@@ -47,7 +47,7 @@ pure function relu(x) result(res)
4747
end function relu
4848

4949
pure function relu_prime(x) result(res)
50-
! First derivative of the REctified Linear Unit (RELU) activation function.
50+
!! First derivative of the REctified Linear Unit (RELU) activation function.
5151
real(rk), intent(in) :: x(:)
5252
real(rk) :: res(size(x))
5353
where (x > 0)
@@ -58,21 +58,21 @@ pure function relu_prime(x) result(res)
5858
end function relu_prime
5959

6060
pure function sigmoid(x) result(res)
61-
! Sigmoid activation function.
61+
!! Sigmoid activation function.
6262
real(rk), intent(in) :: x(:)
6363
real(rk) :: res(size(x))
6464
res = 1 / (1 + exp(-x))
6565
endfunction sigmoid
6666

6767
pure function sigmoid_prime(x) result(res)
68-
! First derivative of the sigmoid activation function.
68+
!! First derivative of the sigmoid activation function.
6969
real(rk), intent(in) :: x(:)
7070
real(rk) :: res(size(x))
7171
res = sigmoid(x) * (1 - sigmoid(x))
7272
end function sigmoid_prime
7373

7474
pure function step(x) result(res)
75-
! Step activation function.
75+
!! Step activation function.
7676
real(rk), intent(in) :: x(:)
7777
real(rk) :: res(size(x))
7878
where (x > 0)
@@ -83,24 +83,24 @@ pure function step(x) result(res)
8383
end function step
8484

8585
pure function step_prime(x) result(res)
86-
! First derivative of the step activation function.
86+
!! First derivative of the step activation function.
8787
real(rk), intent(in) :: x(:)
8888
real(rk) :: res(size(x))
8989
res = 0
9090
end function step_prime
9191

9292
pure function tanhf(x) result(res)
93-
! Tangent hyperbolic activation function.
94-
! Same as the intrinsic tanh, but must be
95-
! defined here so that we can use procedure
96-
! pointer with it.
93+
!! Tangent hyperbolic activation function.
94+
!! Same as the intrinsic tanh, but must be
95+
!! defined here so that we can use procedure
96+
!! pointer with it.
9797
real(rk), intent(in) :: x(:)
9898
real(rk) :: res(size(x))
9999
res = tanh(x)
100100
end function tanhf
101101

102102
pure function tanh_prime(x) result(res)
103-
! First derivative of the tanh activation function.
103+
!! First derivative of the tanh activation function.
104104
real(rk), intent(in) :: x(:)
105105
real(rk) :: res(size(x))
106106
res = 1 - tanh(x)**2

src/mod_layer.f90

+18-18
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module mod_layer
22

3-
! Defines the layer type and its methods.
3+
!! Defines the layer type and its methods.
44

55
use mod_activation
66
use mod_kinds, only: ik, rk
@@ -12,13 +12,13 @@ module mod_layer
1212
public :: array1d, array2d, db_init, db_co_sum, dw_init, dw_co_sum, layer_type
1313

1414
type :: layer_type
15-
real(rk), allocatable :: a(:) ! activations
16-
real(rk), allocatable :: b(:) ! biases
17-
real(rk), allocatable :: w(:,:) ! weights
18-
real(rk), allocatable :: z(:) ! arg. to activation function
15+
real(rk), allocatable :: a(:) !! activations
16+
real(rk), allocatable :: b(:) !! biases
17+
real(rk), allocatable :: w(:,:) !! weights
18+
real(rk), allocatable :: z(:) !! arg. to activation function
1919
procedure(activation_function), pointer, nopass :: activation => null()
2020
procedure(activation_function), pointer, nopass :: activation_prime => null()
21-
character(len=:), allocatable :: activation_str ! activation character string
21+
character(len=:), allocatable :: activation_str !! activation character string
2222
contains
2323
procedure, public, pass(self) :: set_activation
2424
end type layer_type
@@ -46,9 +46,9 @@ module mod_layer
4646
contains
4747

4848
type(layer_type) function constructor(this_size, next_size) result(layer)
49-
! Layer class constructor. this_size is the number of neurons in the layer.
50-
! next_size is the number of neurons in the next layer, used to allocate
51-
! the weights.
49+
!! Layer class constructor. this_size is the number of neurons in the layer.
50+
!! next_size is the number of neurons in the next layer, used to allocate
51+
!! the weights.
5252
integer(ik), intent(in) :: this_size, next_size
5353
allocate(layer % a(this_size))
5454
allocate(layer % z(this_size))
@@ -59,21 +59,21 @@ type(layer_type) function constructor(this_size, next_size) result(layer)
5959
end function constructor
6060

6161
pure type(array1d) function array1d_constructor(length) result(a)
62-
! Overloads the default type constructor.
62+
!! Overloads the default type constructor.
6363
integer(ik), intent(in) :: length
6464
allocate(a % array(length))
6565
a % array = 0
6666
end function array1d_constructor
6767

6868
pure type(array2d) function array2d_constructor(dims) result(a)
69-
! Overloads the default type constructor.
69+
!! Overloads the default type constructor.
7070
integer(ik), intent(in) :: dims(2)
7171
allocate(a % array(dims(1), dims(2)))
7272
a % array = 0
7373
end function array2d_constructor
7474

7575
pure subroutine db_init(db, dims)
76-
! Initialises biases structure.
76+
!! Initialises biases structure.
7777
type(array1d), allocatable, intent(in out) :: db(:)
7878
integer(ik), intent(in) :: dims(:)
7979
integer(ik) :: n, nm
@@ -86,7 +86,7 @@ pure subroutine db_init(db, dims)
8686
end subroutine db_init
8787

8888
pure subroutine dw_init(dw, dims)
89-
! Initialises weights structure.
89+
!! Initialises weights structure.
9090
type(array2d), allocatable, intent(in out) :: dw(:)
9191
integer(ik), intent(in) :: dims(:)
9292
integer(ik) :: n, nm
@@ -99,7 +99,7 @@ pure subroutine dw_init(dw, dims)
9999
end subroutine dw_init
100100

101101
subroutine db_co_sum(db)
102-
! Performs a collective sum of bias tendencies.
102+
!! Performs a collective sum of bias tendencies.
103103
type(array1d), allocatable, intent(in out) :: db(:)
104104
integer(ik) :: n
105105
do n = 2, size(db)
@@ -110,7 +110,7 @@ subroutine db_co_sum(db)
110110
end subroutine db_co_sum
111111

112112
subroutine dw_co_sum(dw)
113-
! Performs a collective sum of weights tendencies.
113+
!! Performs a collective sum of weights tendencies.
114114
type(array2d), allocatable, intent(in out) :: dw(:)
115115
integer(ik) :: n
116116
do n = 1, size(dw) - 1
@@ -121,9 +121,9 @@ subroutine dw_co_sum(dw)
121121
end subroutine dw_co_sum
122122

123123
pure elemental subroutine set_activation(self, activation)
124-
! Sets the activation function. Input string must match one of
125-
! provided activation functions, otherwise it defaults to sigmoid.
126-
! If activation not present, defaults to sigmoid.
124+
!! Sets the activation function. Input string must match one of
125+
!! provided activation functions, otherwise it defaults to sigmoid.
126+
!! If activation not present, defaults to sigmoid.
127127
class(layer_type), intent(in out) :: self
128128
character(len=*), intent(in) :: activation
129129
select case(trim(activation))

src/mod_mnist.f90

+12-12
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
module mod_mnist
22

3-
! Procedures to work with MNIST dataset, usable with data format
4-
! as provided in this repo and not the original data format (idx).
3+
!! Procedures to work with MNIST dataset, usable with data format
4+
!! as provided in this repo and not the original data format (idx).
55

6-
use iso_fortran_env, only: real32 ! TODO make MNIST work with arbitrary precision
6+
use iso_fortran_env, only: real32 !! TODO make MNIST work with arbitrary precision
77
use mod_io, only: read_binary_file
88
use mod_kinds, only: ik, rk
99

@@ -16,20 +16,20 @@ module mod_mnist
1616
contains
1717

1818
pure function digits(x)
19-
! Returns an array of 10 reals, with zeros everywhere
20-
! and a one corresponding to the input number, for example:
21-
! digits(0) = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
22-
! digits(1) = [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]
23-
! digits(6) = [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]
19+
!! Returns an array of 10 reals, with zeros everywhere
20+
!! and a one corresponding to the input number, for example:
21+
!! digits(0) = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
22+
!! digits(1) = [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]
23+
!! digits(6) = [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]
2424
real(rk), intent(in) :: x
2525
real(rk) :: digits(10)
2626
digits = 0
2727
digits(int(x + 1)) = 1
2828
end function digits
2929

3030
pure function label_digits(labels) result(res)
31-
! Converts an array of MNIST labels into a form
32-
! that can be input to the network_type instance.
31+
!! Converts an array of MNIST labels into a form
32+
!! that can be input to the network_type instance.
3333
real(rk), intent(in) :: labels(:)
3434
real(rk) :: res(10, size(labels))
3535
integer(ik) :: i
@@ -40,7 +40,7 @@ end function label_digits
4040

4141
subroutine load_mnist(tr_images, tr_labels, te_images,&
4242
te_labels, va_images, va_labels)
43-
! Loads the MNIST dataset into arrays.
43+
!! Loads the MNIST dataset into arrays.
4444
real(rk), allocatable, intent(in out) :: tr_images(:,:), tr_labels(:)
4545
real(rk), allocatable, intent(in out) :: te_images(:,:), te_labels(:)
4646
real(rk), allocatable, intent(in out), optional :: va_images(:,:), va_labels(:)
@@ -69,7 +69,7 @@ subroutine load_mnist(tr_images, tr_labels, te_images,&
6969
end subroutine load_mnist
7070

7171
subroutine print_image(images, labels, n)
72-
! Prints a single image and label to screen.
72+
!! Prints a single image and label to screen.
7373
real(rk), intent(in) :: images(:,:), labels(:)
7474
integer(ik), intent(in) :: n
7575
real(rk) :: image(28, 28)

0 commit comments

Comments
 (0)