-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlayer_sigmoid.go
51 lines (38 loc) · 1.1 KB
/
layer_sigmoid.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
package torch
import (
"github.com/Jimmy2099/torch/data_store/tensor"
math "github.com/chewxy/math32"
)
type SigmoidLayer struct {
}
func NewSigmoidLayer() *SigmoidLayer {
return &SigmoidLayer{}
}
func Sigmoid(x float32) float32 {
return 1.0 / (1.0 + math.Exp(-x))
}
func SigmoidDerivative(x float32) float32 {
s := Sigmoid(x)
return s * (1.0 - s)
}
func (s *SigmoidLayer) Forward(input *tensor.Tensor) *tensor.Tensor {
shape := input.GetShape()
outputData := make([]float32, len(input.Data))
for i := 0; i < len(input.Data); i++ {
outputData[i] = Sigmoid(input.Data[i])
}
return tensor.NewTensor(outputData, shape)
}
func (s *SigmoidLayer) Backward(gradOutput *tensor.Tensor, learningRate float32) *tensor.Tensor {
shape := gradOutput.GetShape()
gradInputData := make([]float32, len(gradOutput.Data))
for i := 0; i < len(gradOutput.Data); i++ {
gradInputData[i] = gradOutput.Data[i] * SigmoidDerivative(gradOutput.Data[i])
}
return tensor.NewTensor(gradInputData, shape)
}
func (s *SigmoidLayer) ZeroGrad() {
}
func (s *SigmoidLayer) Parameters() []*tensor.Tensor {
return []*tensor.Tensor{}
}