1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
|
#include <string>
#include <sstream>
#include "cuRecurrent.h"
#include "cumath.h"
#include "cuda_runtime.h"
namespace TNet
{
void
CuRecurrent::
PropagateFnc(const CuMatrix<BaseFloat>& X, CuMatrix<BaseFloat>& Y)
{
assert(X.Rows() == 1);
assert(Y.Rows() == 1);
if(mInputHistory.Rows() == 0) {
Error("Bptt order was not set");
}
//pushback the history
CuMatrix<BaseFloat> tmp(mInputHistory.Rows()-1,mInputHistory.Cols());
tmp.CopyRows(tmp.Rows(),0,mInputHistory,0);
mInputHistory.CopyRows(tmp.Rows(),0,tmp,1);
//compose the input vector to 0th row, use input X and previous Y
cudaMemcpy(mInputHistory.pCUData(), X.pCUData(),
sizeof(BaseFloat)*X.Cols(), cudaMemcpyDeviceToDevice);
cudaMemcpy(mInputHistory.pCUData()+X.Cols(), Y.pCUData(),
sizeof(BaseFloat)*Y.Cols(), cudaMemcpyDeviceToDevice);
//extract first row
//CuMatrix<BaseFloat> first_row(1,mInputHistory.Cols());
//first_row.CopyRows(1,0,mInputHistory,0);
//calculate the output
Y.AddScaledRow(1.0,mBias,0.0);
//take 0th vector of history, propagate
CuMath<BaseFloat>::OffsetGemv('T',1.0,mLinearity,mInputHistory.pCUData(),mInputHistory.Cols(),1.0,Y.pCUData(),Y.Cols(),0);
//Y.Gemm('N','N', 1.0, first_row, mLinearity, 1.0);
CuMath<BaseFloat>::Sigmoid(Y,Y);
/*
std::cout << "-------------------------------------" << std::endl;
X.Print();
Y.Print();
mInputHistory.Print();
*/
}
void
CuRecurrent::
BackpropagateFnc(const CuMatrix<BaseFloat>& X, CuMatrix<BaseFloat>& Y)
{
assert(Y.Rows() == 1);
assert(X.Rows() == 1);
//apply diff sigmoid
CuMatrix<BaseFloat> diff_sigm(1,X.Cols());
CuMath<BaseFloat>::DiffSigmoid(diff_sigm,X,GetOutput());
//:TODO: inefficent to calculate all the input errors!!!
// we need only part of them!
//
//backward-multiply by weights
/*
CuMatrix<BaseFloat> err_prev(1,mLinearity.Rows());
err_prev.Gemm('N', 'T', 1.0, diff_sigm, mLinearity, 0.0);
//copy out the interval
cudaMemcpy(Y.pCUData(),err_prev.pCUData(),
sizeof(BaseFloat)*Y.Cols(),cudaMemcpyDeviceToDevice);
*/
//backward-multiply by weights
CuMath<BaseFloat>::OffsetGemv('N',1.0,mLinearity,diff_sigm.pCUData(),diff_sigm.Cols(),1.0,Y.pCUData(),Y.Cols(),0);
}
void
CuRecurrent::
Update()
{
//
//correction from PRESENT input x error pair
//
//apply diff sigmoid
CuMatrix<BaseFloat> diff_sigm(1,GetOutput().Cols());
CuMath<BaseFloat>::DiffSigmoid(diff_sigm,GetErrorInput(),GetOutput());
//get 0th row of history (present time)
CuMatrix<BaseFloat> history_row(1,mInputHistory.Cols());
history_row.CopyRows(1,0,mInputHistory,0);
//calculate update
//mLinearityCorrection.Gemm('T','N',-mLearningRate,history_row,diff_sigm,mMomentum);
mLinearityCorrection.SetConst(0.0); //:TODO: should be scale/momentum
CuMath<BaseFloat>::BlasGer(-mLearningRate,history_row.pCUData(),history_row.Cols(),diff_sigm.pCUData(),diff_sigm.Cols(),mLinearityCorrection);
mBiasCorrection.AddColSum(-mLearningRate,diff_sigm,mMomentum);
//
//BPTT (backprop through time)
//
CuMatrix<BaseFloat> err_prev(1,mLinearity.Rows());
CuMatrix<BaseFloat> err_prev_part(1,diff_sigm.Cols());
CuMatrix<BaseFloat> history_output(1,GetOutput().Cols());
for(int i=1; i<=mBpttOrder; i++) {
//:TODO: inefficent to calculate all the input errors!!!
// we need only part of them!
//
/*
//get previous error
err_prev.Gemm('N','T',1.0,diff_sigm,mLinearity,0.0);
//select interval
cudaMemcpy(err_prev_part.pCUData(),err_prev.pCUData()+GetNInputs(),
sizeof(BaseFloat)*err_prev_part.Cols(),cudaMemcpyDeviceToDevice);
*/
//backward-multiply by weights
CuMath<BaseFloat>::OffsetGemv('N',1.0,mLinearity,diff_sigm.pCUData(),diff_sigm.Cols(),0.0,err_prev_part.pCUData(),err_prev_part.Cols(),GetInput().Cols());
//apply diff sigmoid with activations of HISTORY frame!!!
cudaMemcpy(history_output.pCUData(), mInputHistory.pCURowData(i-1)+GetInput().Cols(),
sizeof(BaseFloat)*history_output.Cols(), cudaMemcpyDeviceToDevice);
CuMath<BaseFloat>::DiffSigmoid(diff_sigm,err_prev_part,history_output);
//get history row
history_row.CopyRows(1,i,mInputHistory,0);
//accu the update
//mLinearityCorrection.Gemm('T','N',-mLearningRate,history_row,diff_sigm,1.0);
CuMath<BaseFloat>::BlasGer(-mLearningRate,history_row.pCUData(),history_row.Cols(),diff_sigm.pCUData(),diff_sigm.Cols(),mLinearityCorrection);
mBiasCorrection.AddColSum(-mLearningRate,diff_sigm,1.0);
}
//
//update the weights
//
//regularization weight decay
mLinearityCorrection.AddScaled(-mLearningRate*mWeightcost,mLinearity,1.0);
//perform update
mLinearity.AddScaled(1.0,mLinearityCorrection,1.0);
mBias.AddScaled(1.0,mBiasCorrection,1.0);
}
void
CuRecurrent::
ReadFromStream(std::istream& rIn)
{
//matrix is stored transposed as SNet does
BfMatrix transpose;
rIn >> transpose;
mLinearity.CopyFrom(BfMatrix(transpose, TRANS));
//biases stored normally
BfVector bias;
rIn >> bias;
mBias.CopyFrom(bias);
}
void
CuRecurrent::
WriteToStream(std::ostream& rOut)
{
//matrix is stored transposed as SNet does
BfMatrix tmp;
mLinearity.CopyTo(tmp);
BfMatrix transpose(tmp, TRANS);
rOut << transpose;
//biases stored normally
BfVector vec;
mBias.CopyTo(vec);
rOut << vec;
rOut << std::endl;
}
} //namespace
|