22 use nf_base_layer, only: base_layer
33 use nf_random, only: random_normal
44 implicit none
5+
56contains
7+
68 module function linear2d_layer_cons (out_features ) result(res)
79 integer , intent (in ) :: out_features
810 type (linear2d_layer) :: res
911
1012 res % out_features = out_features
1113 end function linear2d_layer_cons
1214
15+
1316 module subroutine init (self , input_shape )
1417 class(linear2d_layer), intent (in out ) :: self
1518 integer , intent (in ) :: input_shape(:)
1619
1720 if (size (input_shape) /= 2 ) then
18- error stop " Linear2D Layer accepts 2D input"
21+ error stop " linear2d layer requires 2D input. "
1922 end if
2023 self % sequence_length = input_shape(1 )
2124 self % in_features = input_shape(2 )
@@ -30,40 +33,45 @@ module subroutine init(self, input_shape)
3033 call random_normal(self % biases)
3134
3235 allocate (self % dw(self % in_features, self % out_features))
33- self % dw = 0.0
36+ self % dw = 0
3437 allocate (self % db(self % out_features))
35- self % db = 0.0
38+ self % db = 0
39+
3640 end subroutine init
3741
42+
3843 pure module subroutine forward(self, input)
3944 class(linear2d_layer), intent (in out ) :: self
4045 real , intent (in ) :: input(:, :)
4146 integer :: i
4247
43- self % output(:, :) = matmul (input(:, :), self % weights)
44- do concurrent(i = 1 : self % sequence_length)
45- self % output(i, :) = self % output(i, :) + self % biases
48+ self % output(:,:) = matmul (input(:,:), self % weights)
49+ do concurrent(i = 1 :self % sequence_length)
50+ self % output(i,:) = self % output(i,:) + self % biases
4651 end do
52+
4753 end subroutine forward
4854
55+
4956 pure module subroutine backward(self, input, gradient)
5057 class(linear2d_layer), intent (in out ) :: self
51- real , intent (in ) :: input(:, :)
52- real , intent (in ) :: gradient(:, :)
58+ real , intent (in ) :: input(:,:)
59+ real , intent (in ) :: gradient(:,:)
5360 real :: db(self % out_features)
5461 real :: dw(self % in_features, self % out_features)
5562 integer :: i
5663
57- self % dw = self % dw + matmul (transpose (input(:, :)), gradient(:, :))
58- self % db = self % db + sum (gradient(:, :), 1 )
59- self % gradient(:, :) = matmul (gradient(:, :), transpose (self % weights))
64+ self % dw = self % dw + matmul (transpose (input(:,:)), gradient(:,:))
65+ self % db = self % db + sum (gradient(:,:), 1 )
66+ self % gradient(:,:) = matmul (gradient(:,:), transpose (self % weights))
6067 end subroutine backward
6168
69+
6270 pure module function get_num_params(self) result(num_params)
6371 class(linear2d_layer), intent (in ) :: self
6472 integer :: num_params
6573
66- ! Number of weigths times number of biases
74+ ! Number of weights times number of biases
6775 num_params = self % in_features * self % out_features + self % out_features
6876
6977 end function get_num_params
@@ -122,4 +130,5 @@ module subroutine set_params(self, params)
122130 end associate
123131
124132 end subroutine set_params
133+
125134end submodule nf_linear2d_layer_submodule
0 commit comments