Simple Recurrent Neural Network Predictor

Dependents:   WeatherPredictor

Committer:
yukari_hinata
Date:
Thu Feb 19 19:15:04 2015 +0000
Revision:
7:92ea6cefc6a5
Parent:
6:e97ccc643bf1
reviced

Who changed what in which revision?

UserRevisionLine numberNew contents of line
yukari_hinata 0:0d42047e140c 1 #include "SRNN.hpp"
yukari_hinata 0:0d42047e140c 2
yukari_hinata 0:0d42047e140c 3 /* コンストラクタ - 最小の初期化パラメタ
yukari_hinata 0:0d42047e140c 4 * 適宜追加する可能性あり
yukari_hinata 0:0d42047e140c 5 */
yukari_hinata 1:da597cb284a2 6 SRNN::SRNN(int dim,
yukari_hinata 0:0d42047e140c 7 int num_mid,
yukari_hinata 0:0d42047e140c 8 int len_seq,
yukari_hinata 1:da597cb284a2 9 int len_predict,
yukari_hinata 0:0d42047e140c 10 float* input_sample,
yukari_hinata 0:0d42047e140c 11 float* input_sample_maxmin)
yukari_hinata 0:0d42047e140c 12 {
yukari_hinata 1:da597cb284a2 13
yukari_hinata 1:da597cb284a2 14 this->dim_signal = dim;
yukari_hinata 1:da597cb284a2 15 this->num_mid_neuron = num_mid; // Advice : number of hidden layter shuld be as large as possible.
yukari_hinata 1:da597cb284a2 16 this->len_seqence = len_seq;
yukari_hinata 1:da597cb284a2 17 this->len_predict = len_predict;
yukari_hinata 1:da597cb284a2 18
yukari_hinata 1:da597cb284a2 19 // sample/sample_maxmin allocation
yukari_hinata 1:da597cb284a2 20 this->sample = new float[len_seqence * dim_signal];
yukari_hinata 5:026d42b4455f 21 this->norm_sample = new float[len_seqence * dim_signal];
yukari_hinata 1:da597cb284a2 22 this->sample_maxmin = new float[dim_signal * 2];
yukari_hinata 1:da597cb284a2 23
yukari_hinata 1:da597cb284a2 24 memcpy(this->sample, input_sample, sizeof(float) * len_seqence * dim_signal);
yukari_hinata 1:da597cb284a2 25 memcpy(this->sample_maxmin, input_sample_maxmin, sizeof(float) * dim_signal * 2);
yukari_hinata 1:da597cb284a2 26
yukari_hinata 1:da597cb284a2 27 this->predict_signal = new float[dim_signal * len_predict];
yukari_hinata 0:0d42047e140c 28
yukari_hinata 1:da597cb284a2 29 // coffecience matrix allocation
yukari_hinata 1:da597cb284a2 30 // final +1 for bias
yukari_hinata 1:da597cb284a2 31 this->Win_mid = new float[num_mid_neuron * (dim_signal + num_mid_neuron + 1)];
yukari_hinata 1:da597cb284a2 32 this->Wmid_out = new float[dim_signal * (num_mid_neuron + 1)];
yukari_hinata 1:da597cb284a2 33
yukari_hinata 1:da597cb284a2 34 // input/hidden layer signal allocation
yukari_hinata 1:da597cb284a2 35 expand_in_signal = new float[dim_signal + num_mid_neuron + 1];
yukari_hinata 1:da597cb284a2 36 expand_mid_signal = new float[num_mid_neuron + 1];
yukari_hinata 0:0d42047e140c 37
yukari_hinata 5:026d42b4455f 38 // For BUG FIX...
yukari_hinata 5:026d42b4455f 39 // 係数行列のサイズ
yukari_hinata 5:026d42b4455f 40 int row_in_mid = num_mid_neuron;
yukari_hinata 5:026d42b4455f 41 int col_in_mid = dim_signal + num_mid_neuron + 1;
yukari_hinata 5:026d42b4455f 42 int row_mid_out = dim_signal;
yukari_hinata 5:026d42b4455f 43 int col_mid_out = num_mid_neuron + 1;
yukari_hinata 5:026d42b4455f 44
yukari_hinata 5:026d42b4455f 45 // 行列のアロケート
yukari_hinata 5:026d42b4455f 46 // 係数行列の更新量
yukari_hinata 5:026d42b4455f 47 this->dWin_mid = new float[row_in_mid * col_in_mid];
yukari_hinata 5:026d42b4455f 48 this->dWmid_out = new float[row_mid_out * col_mid_out];
yukari_hinata 5:026d42b4455f 49
yukari_hinata 5:026d42b4455f 50 // 前回の更新量:慣性項に用いる.
yukari_hinata 5:026d42b4455f 51 this->prevdWin_mid = new float[row_in_mid * col_in_mid];
yukari_hinata 5:026d42b4455f 52 this->prevdWmid_out = new float[row_mid_out * col_mid_out];
yukari_hinata 5:026d42b4455f 53 // 出力層の信号
yukari_hinata 5:026d42b4455f 54 this->out_signal = new float[dim_signal];
yukari_hinata 5:026d42b4455f 55 // 入力層->中間層の信号和
yukari_hinata 5:026d42b4455f 56 this->in_mid_net = new float[num_mid_neuron];
yukari_hinata 5:026d42b4455f 57 // 中間層->出力層の信号和.
yukari_hinata 5:026d42b4455f 58 this->mid_out_net = new float[dim_signal];
yukari_hinata 5:026d42b4455f 59 // 誤差信号
yukari_hinata 5:026d42b4455f 60 this->sigma = new float[dim_signal];
yukari_hinata 5:026d42b4455f 61
yukari_hinata 1:da597cb284a2 62 // Parameter settings (Tuning by taiyo)
yukari_hinata 1:da597cb284a2 63 this->squareError = FLT_MAX; // (large value)
yukari_hinata 1:da597cb284a2 64 this->maxIteration = 5000;
yukari_hinata 1:da597cb284a2 65 this->goalError = float(0.001);
yukari_hinata 1:da597cb284a2 66 this->epsilon = float(0.00001);
yukari_hinata 1:da597cb284a2 67 this->learnRate = float(0.9); // 敏感に反応できるように, 高めに設定した. 時系列データなので, サンプルの時間間隔によって変えるべき
yukari_hinata 1:da597cb284a2 68 this->alpha = float(0.8 * learnRate);
yukari_hinata 1:da597cb284a2 69 this->alpha_context = float(0.8);
yukari_hinata 1:da597cb284a2 70 this->width_initW = float(1.0/num_mid_neuron);
yukari_hinata 0:0d42047e140c 71
yukari_hinata 1:da597cb284a2 72 // random seed decide by time
yukari_hinata 1:da597cb284a2 73 srand((unsigned int)time(NULL));
yukari_hinata 1:da597cb284a2 74
yukari_hinata 0:0d42047e140c 75 }
yukari_hinata 0:0d42047e140c 76
yukari_hinata 0:0d42047e140c 77 SRNN::~SRNN(void)
yukari_hinata 0:0d42047e140c 78 {
yukari_hinata 4:9d94330f380a 79 delete [] sample;
yukari_hinata 5:026d42b4455f 80 delete [] norm_sample;
yukari_hinata 4:9d94330f380a 81 delete [] sample_maxmin;
yukari_hinata 4:9d94330f380a 82 delete [] predict_signal;
yukari_hinata 4:9d94330f380a 83 delete [] Win_mid;
yukari_hinata 4:9d94330f380a 84 delete [] Wmid_out;
yukari_hinata 4:9d94330f380a 85 delete [] expand_in_signal;
yukari_hinata 0:0d42047e140c 86 delete [] expand_mid_signal;
yukari_hinata 0:0d42047e140c 87 }
yukari_hinata 0:0d42047e140c 88
yukari_hinata 1:da597cb284a2 89 /* utilに移動するべき */
yukari_hinata 0:0d42047e140c 90 void SRNN::sigmoid_vec(float* net,
yukari_hinata 0:0d42047e140c 91 float* out,
yukari_hinata 0:0d42047e140c 92 int dim)
yukari_hinata 0:0d42047e140c 93 {
yukari_hinata 1:da597cb284a2 94 for (int n=0; n<dim; n++)
yukari_hinata 1:da597cb284a2 95 out[n] = sigmoid_func(net[n]);
yukari_hinata 0:0d42047e140c 96 }
yukari_hinata 0:0d42047e140c 97
yukari_hinata 0:0d42047e140c 98 /* Predict : predicting next sequence of input */
yukari_hinata 0:0d42047e140c 99 void SRNN::predict(float* input)
yukari_hinata 0:0d42047e140c 100 {
yukari_hinata 5:026d42b4455f 101
yukari_hinata 1:da597cb284a2 102 float *norm_input = new float[this->dim_signal];
yukari_hinata 1:da597cb284a2 103 // output signal
yukari_hinata 1:da597cb284a2 104 float* out_signal = new float[dim_signal];
yukari_hinata 1:da597cb284a2 105 // value of network in input->hidden layer
yukari_hinata 1:da597cb284a2 106 float* in_mid_net = new float[num_mid_neuron];
yukari_hinata 1:da597cb284a2 107 // value of network in hidden->output layer
yukari_hinata 1:da597cb284a2 108 float* mid_out_net = new float[dim_signal];
yukari_hinata 0:0d42047e140c 109
yukari_hinata 1:da597cb284a2 110 /* Calcurate output signal */
yukari_hinata 1:da597cb284a2 111 for (int i_predict = 0; i_predict < len_predict; i_predict++) {
yukari_hinata 0:0d42047e140c 112
yukari_hinata 1:da597cb284a2 113 // normalize signal
yukari_hinata 1:da597cb284a2 114 for (int n=0; n < dim_signal; n++) {
yukari_hinata 1:da597cb284a2 115 if (i_predict == 0) {
yukari_hinata 1:da597cb284a2 116 // First : given input
yukari_hinata 1:da597cb284a2 117 norm_input[n] = normalize_signal(input[n], MATRIX_AT(this->sample_maxmin,2,n,0), MATRIX_AT(this->sample_maxmin,2,n,1));
yukari_hinata 1:da597cb284a2 118 } else {
yukari_hinata 1:da597cb284a2 119 // Second~ : previous output
yukari_hinata 1:da597cb284a2 120 norm_input[n] = out_signal[n];
yukari_hinata 1:da597cb284a2 121 }
yukari_hinata 1:da597cb284a2 122 }
yukari_hinata 0:0d42047e140c 123
yukari_hinata 1:da597cb284a2 124 // Get input signal
yukari_hinata 1:da597cb284a2 125 memcpy(expand_in_signal, norm_input, sizeof(float) * dim_signal);
yukari_hinata 1:da597cb284a2 126 // Signal of input layer : 中間層との線形和をシグモイド関数に通す.
yukari_hinata 1:da597cb284a2 127 for (int d = 0; d < num_mid_neuron; d++) {
yukari_hinata 1:da597cb284a2 128 expand_in_signal[dim_signal + d] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d] + expand_mid_signal[d]);
yukari_hinata 1:da597cb284a2 129 }
yukari_hinata 1:da597cb284a2 130 // Bias fixed at 1.
yukari_hinata 1:da597cb284a2 131 expand_in_signal[dim_signal + num_mid_neuron] = 1;
yukari_hinata 1:da597cb284a2 132
yukari_hinata 1:da597cb284a2 133 // 入力->中間層の出力信号和計算
yukari_hinata 1:da597cb284a2 134 multiply_mat_vec(Win_mid, expand_in_signal, in_mid_net, num_mid_neuron, dim_signal + num_mid_neuron + 1);
yukari_hinata 1:da597cb284a2 135 // 中間層の出力信号計算
yukari_hinata 1:da597cb284a2 136 sigmoid_vec(in_mid_net, expand_mid_signal, num_mid_neuron);
yukari_hinata 1:da597cb284a2 137 expand_mid_signal[num_mid_neuron] = 1;
yukari_hinata 0:0d42047e140c 138
yukari_hinata 1:da597cb284a2 139 // 中間->出力層の出力信号和計算
yukari_hinata 1:da597cb284a2 140 multiply_mat_vec(Wmid_out, expand_mid_signal, mid_out_net, dim_signal, num_mid_neuron + 1);
yukari_hinata 1:da597cb284a2 141 // 出力層の出力信号計算
yukari_hinata 1:da597cb284a2 142 sigmoid_vec(mid_out_net, out_signal, dim_signal);
yukari_hinata 0:0d42047e140c 143
yukari_hinata 1:da597cb284a2 144 // expand output signal to origin width.
yukari_hinata 1:da597cb284a2 145 for (int n=0; n < dim_signal; n++) {
yukari_hinata 1:da597cb284a2 146 predict_signal[i_predict * dim_signal + n] = expand_signal(out_signal[n],sample_maxmin[n * 2],sample_maxmin[n * 2 + 1]);
yukari_hinata 1:da597cb284a2 147 }
yukari_hinata 4:9d94330f380a 148
yukari_hinata 1:da597cb284a2 149 }
yukari_hinata 4:9d94330f380a 150
yukari_hinata 1:da597cb284a2 151 // 領域解放
yukari_hinata 4:9d94330f380a 152 delete [] norm_input;
yukari_hinata 4:9d94330f380a 153 delete [] out_signal;
yukari_hinata 4:9d94330f380a 154 delete [] in_mid_net;
yukari_hinata 4:9d94330f380a 155 delete [] mid_out_net;
yukari_hinata 0:0d42047e140c 156
yukari_hinata 0:0d42047e140c 157 }
yukari_hinata 0:0d42047e140c 158
yukari_hinata 0:0d42047e140c 159 /* 逆誤差伝搬法による学習 局所解?なんのこったよ(すっとぼけ)*/
yukari_hinata 0:0d42047e140c 160 float SRNN::learning(void)
yukari_hinata 0:0d42047e140c 161 {
yukari_hinata 1:da597cb284a2 162 int iteration = 0; // 学習繰り返し回数
yukari_hinata 1:da597cb284a2 163 int seq = 0; // 現在学習中の系列番号[0,...,len_seqence-1]
yukari_hinata 1:da597cb284a2 164 int end_flag = 0; // 学習終了フラグ.このフラグが成立したら今回のsequenceを最後まで回して終了する.
yukari_hinata 1:da597cb284a2 165 // 係数行列のサイズ
yukari_hinata 1:da597cb284a2 166 int row_in_mid = num_mid_neuron;
yukari_hinata 1:da597cb284a2 167 int col_in_mid = dim_signal + num_mid_neuron + 1;
yukari_hinata 1:da597cb284a2 168 int row_mid_out = dim_signal;
yukari_hinata 1:da597cb284a2 169 int col_mid_out = num_mid_neuron + 1;
yukari_hinata 0:0d42047e140c 170
yukari_hinata 1:da597cb284a2 171 // 係数行列の初期化
yukari_hinata 1:da597cb284a2 172 for (int i=0; i < row_in_mid; i++)
yukari_hinata 1:da597cb284a2 173 for (int j=0; j < col_in_mid; j++)
yukari_hinata 1:da597cb284a2 174 MATRIX_AT(Win_mid,col_in_mid,i,j) = uniform_rand(width_initW);
yukari_hinata 0:0d42047e140c 175
yukari_hinata 1:da597cb284a2 176 for (int i=0; i < row_mid_out; i++)
yukari_hinata 1:da597cb284a2 177 for (int j=0; j < col_mid_out; j++)
yukari_hinata 1:da597cb284a2 178 MATRIX_AT(Wmid_out,col_mid_out,i,j) = uniform_rand(width_initW);
yukari_hinata 0:0d42047e140c 179
yukari_hinata 5:026d42b4455f 180
yukari_hinata 1:da597cb284a2 181 // 信号の正規化:経験上,非常に大切な処理
yukari_hinata 5:026d42b4455f 182 float buf_float;
yukari_hinata 4:9d94330f380a 183 for (int i_seq=0; i_seq < len_seqence; i_seq++) {
yukari_hinata 4:9d94330f380a 184 for (int dim_n=0; dim_n < dim_signal; dim_n++) {
yukari_hinata 5:026d42b4455f 185 MATRIX_AT(norm_sample,dim_signal,i_seq,dim_n)
yukari_hinata 5:026d42b4455f 186 = normalize_signal(MATRIX_AT(this->sample,dim_signal,i_seq,dim_n),
yukari_hinata 5:026d42b4455f 187 MATRIX_AT(this->sample_maxmin,2,dim_n,0),
yukari_hinata 5:026d42b4455f 188 MATRIX_AT(this->sample_maxmin,2,dim_n,1));
yukari_hinata 1:da597cb284a2 189 }
yukari_hinata 5:026d42b4455f 190 // printf("%f %f %f \r\n", MATRIX_AT(norm_sample,dim_signal,i_seq,0), MATRIX_AT(norm_sample,dim_signal,i_seq,1),MATRIX_AT(norm_sample,dim_signal,i_seq,2));
yukari_hinata 0:0d42047e140c 191 }
yukari_hinata 0:0d42047e140c 192
yukari_hinata 1:da597cb284a2 193 // 前回の二乗誤差値:収束判定に用いる.
yukari_hinata 1:da597cb284a2 194 float prevError;
yukari_hinata 2:d623e7ef4dca 195 squareError = FLT_MAX;
yukari_hinata 1:da597cb284a2 196 /* 学習ループ */
yukari_hinata 1:da597cb284a2 197 while (1) {
yukari_hinata 1:da597cb284a2 198
yukari_hinata 5:026d42b4455f 199 // printf("dWin_out : %p, dWmid_out : %p, prevdWin_mid : %p, prevdWmid_out : %p, out_signal : %p, in_mid_net : %p, mid_out_net : %p, sigma : %p \r\n", dWin_mid, dWmid_out,prevdWin_mid, prevdWmid_out, out_signal, in_mid_net, mid_out_net, sigma);
yukari_hinata 5:026d42b4455f 200
yukari_hinata 1:da597cb284a2 201 // 終了条件を満たすか確認
yukari_hinata 1:da597cb284a2 202 if (!end_flag) {
yukari_hinata 2:d623e7ef4dca 203 end_flag = !(iteration < maxIteration
yukari_hinata 2:d623e7ef4dca 204 && (iteration <= len_seqence
yukari_hinata 2:d623e7ef4dca 205 || squareError > goalError)
yukari_hinata 1:da597cb284a2 206 );
yukari_hinata 1:da597cb284a2 207 }
yukari_hinata 1:da597cb284a2 208
yukari_hinata 7:92ea6cefc6a5 209 // printf("ite:%d err:%f \r\n", iteration, squareError);
yukari_hinata 1:da597cb284a2 210
yukari_hinata 1:da597cb284a2 211 // 系列の末尾に到達していたら,最初からリセットする.
yukari_hinata 1:da597cb284a2 212 if (seq == len_seqence && !end_flag) {
yukari_hinata 1:da597cb284a2 213 seq = 0;
yukari_hinata 1:da597cb284a2 214 }
yukari_hinata 1:da597cb284a2 215
yukari_hinata 1:da597cb284a2 216 // 前回の更新量/二乗誤差を保存
yukari_hinata 1:da597cb284a2 217 if (iteration >= 1) {
yukari_hinata 1:da597cb284a2 218 memcpy(prevdWin_mid, dWin_mid, sizeof(float) * row_in_mid * col_in_mid);
yukari_hinata 1:da597cb284a2 219 memcpy(prevdWmid_out, dWmid_out, sizeof(float) * row_mid_out * col_mid_out);
yukari_hinata 1:da597cb284a2 220 prevError = squareError;
yukari_hinata 1:da597cb284a2 221 } else {
yukari_hinata 1:da597cb284a2 222 // 初回は0埋め
yukari_hinata 1:da597cb284a2 223 memset(prevdWin_mid, float(0), sizeof(float) * row_in_mid * col_in_mid);
yukari_hinata 1:da597cb284a2 224 memset(prevdWmid_out, float(0), sizeof(float) * row_mid_out * col_mid_out);
yukari_hinata 1:da597cb284a2 225 }
yukari_hinata 1:da597cb284a2 226
yukari_hinata 5:026d42b4455f 227 memset(dWin_mid, float(0), sizeof(float) * row_in_mid * col_in_mid);
yukari_hinata 5:026d42b4455f 228 memset(dWmid_out, float(0), sizeof(float) * row_mid_out * col_mid_out);
yukari_hinata 5:026d42b4455f 229
yukari_hinata 1:da597cb284a2 230 /* 学習ステップその1:ニューラルネットの出力信号を求める */
yukari_hinata 0:0d42047e140c 231
yukari_hinata 1:da597cb284a2 232 // 入力値を取得
yukari_hinata 5:026d42b4455f 233 memcpy(expand_in_signal, &(norm_sample[(seq * dim_signal)]), sizeof(float) * dim_signal);
yukari_hinata 5:026d42b4455f 234
yukari_hinata 5:026d42b4455f 235 /*
yukari_hinata 5:026d42b4455f 236 if ( iteration >= 0 ) {
yukari_hinata 5:026d42b4455f 237 printf("%d first 3 norm_sample : %f(%p) %f(%p) %f(%p) \r\n", iteration, norm_sample[seq * dim_signal], &(norm_sample[seq * dim_signal]), norm_sample[seq * dim_signal + 1], &(norm_sample[seq * dim_signal + 1]), norm_sample[seq * dim_signal + 2], &(norm_sample[seq * dim_signal + 2]));
yukari_hinata 5:026d42b4455f 238 printf("%d first 3 expand_in_signal : %f(%p) %f(%p) %f(%p) \r\n", iteration, expand_in_signal[0], &(expand_in_signal[0]), expand_in_signal[1], &(expand_in_signal[1]), expand_in_signal[2], &(expand_in_signal[2]));
yukari_hinata 5:026d42b4455f 239 }
yukari_hinata 5:026d42b4455f 240 */
yukari_hinata 5:026d42b4455f 241
yukari_hinata 1:da597cb284a2 242 // SRNN特有:入力層に中間層のコピーが追加され,中間層に入力される.
yukari_hinata 1:da597cb284a2 243 if (iteration == 0) {
yukari_hinata 1:da597cb284a2 244 // 初回は0埋めする
yukari_hinata 1:da597cb284a2 245 memset(&(expand_in_signal[dim_signal]), float(0), sizeof(float) * num_mid_neuron);
yukari_hinata 1:da597cb284a2 246 } else {
yukari_hinata 1:da597cb284a2 247 // コンテキスト層 = 前回のコンテキスト層の出力
yukari_hinata 1:da597cb284a2 248 // 前回の中間層信号との線形和をシグモイド関数に通す.
yukari_hinata 4:9d94330f380a 249 for (int d_in = 0; d_in < num_mid_neuron; d_in++) {
yukari_hinata 4:9d94330f380a 250 expand_in_signal[dim_signal + d_in] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d_in] + expand_mid_signal[d_in]);
yukari_hinata 1:da597cb284a2 251 }
yukari_hinata 1:da597cb284a2 252 }
yukari_hinata 4:9d94330f380a 253
yukari_hinata 4:9d94330f380a 254 // printf("%d matrix calc start. \r\n", iteration);
yukari_hinata 4:9d94330f380a 255
yukari_hinata 1:da597cb284a2 256 // バイアス項は常に1に固定.
yukari_hinata 1:da597cb284a2 257 expand_in_signal[dim_signal + num_mid_neuron] = 1;
yukari_hinata 4:9d94330f380a 258 // printf(" in bias OK \r\n");
yukari_hinata 1:da597cb284a2 259 // 入力->中間層の出力信号和計算
yukari_hinata 1:da597cb284a2 260 multiply_mat_vec(Win_mid,
yukari_hinata 1:da597cb284a2 261 expand_in_signal,
yukari_hinata 1:da597cb284a2 262 in_mid_net,
yukari_hinata 1:da597cb284a2 263 num_mid_neuron,
yukari_hinata 1:da597cb284a2 264 dim_signal + num_mid_neuron + 1);
yukari_hinata 4:9d94330f380a 265 // printf(" in->mid OK \r\n");
yukari_hinata 1:da597cb284a2 266 // 中間層の出力信号計算
yukari_hinata 1:da597cb284a2 267 sigmoid_vec(in_mid_net,
yukari_hinata 1:da597cb284a2 268 expand_mid_signal,
yukari_hinata 1:da597cb284a2 269 num_mid_neuron);
yukari_hinata 4:9d94330f380a 270 // printf(" mid sigmoid OK \r\n");
yukari_hinata 1:da597cb284a2 271 expand_mid_signal[num_mid_neuron] = 1;
yukari_hinata 4:9d94330f380a 272 // printf(" mid bias OK \r\n");
yukari_hinata 1:da597cb284a2 273 // 中間->出力層の出力信号和計算
yukari_hinata 1:da597cb284a2 274 multiply_mat_vec(Wmid_out,
yukari_hinata 1:da597cb284a2 275 expand_mid_signal,
yukari_hinata 1:da597cb284a2 276 mid_out_net,
yukari_hinata 1:da597cb284a2 277 dim_signal,
yukari_hinata 1:da597cb284a2 278 num_mid_neuron + 1);
yukari_hinata 4:9d94330f380a 279 // printf(" mid->out OK \r\n");
yukari_hinata 1:da597cb284a2 280 // 出力層の出力信号計算
yukari_hinata 1:da597cb284a2 281 sigmoid_vec(mid_out_net,
yukari_hinata 1:da597cb284a2 282 out_signal,
yukari_hinata 1:da597cb284a2 283 dim_signal);
yukari_hinata 4:9d94330f380a 284 // printf(" out sigmoid OK \r\n");
yukari_hinata 1:da597cb284a2 285
yukari_hinata 4:9d94330f380a 286 for (int i_dim = 0; i_dim < dim_signal; i_dim++) {
yukari_hinata 4:9d94330f380a 287 predict_signal[i_dim] = expand_signal(out_signal[i_dim],
yukari_hinata 4:9d94330f380a 288 MATRIX_AT(sample_maxmin,2,i_dim,0),
yukari_hinata 4:9d94330f380a 289 MATRIX_AT(sample_maxmin,2,i_dim,1));
yukari_hinata 1:da597cb284a2 290 }
yukari_hinata 5:026d42b4455f 291
yukari_hinata 5:026d42b4455f 292 // printf("%d sample : %f %f %f \r\n", seq, sample[dim_signal * seq], sample[dim_signal * seq + 1], sample[dim_signal * seq + 2]);
yukari_hinata 5:026d42b4455f 293 // printf("%d output : %f(%p) %f(%p) %f(%p) \r\n", iteration, out_signal[0], &(out_signal[0]), out_signal[1], &(out_signal[1]), out_signal[2], &(out_signal[2]));
yukari_hinata 4:9d94330f380a 294 // printf("%d predict : %f %f %f \r\n", iteration, predict_signal[0], predict_signal[1], predict_signal[2]);
yukari_hinata 1:da597cb284a2 295
yukari_hinata 1:da597cb284a2 296 // この時点での二乗誤差計算
yukari_hinata 1:da597cb284a2 297 squareError = 0;
yukari_hinata 1:da597cb284a2 298 // 次の系列との誤差を見ている!! ここが注目ポイント
yukari_hinata 1:da597cb284a2 299 // ==> つまり,次系列を予測させようとしている.
yukari_hinata 1:da597cb284a2 300 for (int n = 0; n < dim_signal; n++) {
yukari_hinata 1:da597cb284a2 301 if (seq < len_seqence - 1) {
yukari_hinata 1:da597cb284a2 302 squareError += powf((out_signal[n] - MATRIX_AT(norm_sample,dim_signal,(seq + 1),n)),2);
yukari_hinata 1:da597cb284a2 303 } else {
yukari_hinata 1:da597cb284a2 304 squareError += powf((out_signal[n] - MATRIX_AT(norm_sample,dim_signal,0,n)),2);
yukari_hinata 1:da597cb284a2 305 }
yukari_hinata 1:da597cb284a2 306 }
yukari_hinata 1:da597cb284a2 307 squareError /= dim_signal;
yukari_hinata 5:026d42b4455f 308 // printf("%f \r\n", squareError);
yukari_hinata 5:026d42b4455f 309 if ( isnan(squareError) || isinf(squareError) ) {
yukari_hinata 5:026d42b4455f 310 fprintf( stderr, "SRNN LEARNING ERROR!! Learning failed (detected %f) \r\n", squareError);
yukari_hinata 5:026d42b4455f 311 exit(1);
yukari_hinata 5:026d42b4455f 312 }
yukari_hinata 1:da597cb284a2 313
yukari_hinata 1:da597cb284a2 314 /* 学習の終了 */
yukari_hinata 1:da597cb284a2 315 // 終了フラグが立ち,かつ系列の最後に達していたら学習終了
yukari_hinata 1:da597cb284a2 316 if (end_flag && (seq == (len_seqence-1))) {
yukari_hinata 1:da597cb284a2 317 // 予測結果をセット.
yukari_hinata 1:da597cb284a2 318 for (int i = 0; i < dim_signal; i++) {
yukari_hinata 1:da597cb284a2 319 predict_signal[i] = expand_signal(out_signal[i],
yukari_hinata 1:da597cb284a2 320 MATRIX_AT(sample_maxmin,2,i,0),
yukari_hinata 1:da597cb284a2 321 MATRIX_AT(sample_maxmin,2,i,1));
yukari_hinata 4:9d94330f380a 322 // printf("%f ", predict_signal[i]);
yukari_hinata 1:da597cb284a2 323 }
yukari_hinata 1:da597cb284a2 324 break;
yukari_hinata 1:da597cb284a2 325 }
yukari_hinata 1:da597cb284a2 326
yukari_hinata 1:da597cb284a2 327 // 収束したと判定したら終了フラグを立てる.
yukari_hinata 1:da597cb284a2 328 if (fabsf(squareError - prevError) < epsilon) {
yukari_hinata 1:da597cb284a2 329 end_flag = 1;
yukari_hinata 1:da597cb284a2 330 }
yukari_hinata 1:da597cb284a2 331
yukari_hinata 5:026d42b4455f 332 /*
yukari_hinata 5:026d42b4455f 333 if ( iteration > 40 ) {
yukari_hinata 5:026d42b4455f 334 printf("%d sample : %f %f %f \r\n", seq, sample[dim_signal * seq], sample[dim_signal * seq + 1], sample[dim_signal * seq + 2]);
yukari_hinata 5:026d42b4455f 335 printf("%d norm_sample : %f %f %f \r\n", iteration, MATRIX_AT(norm_sample,dim_signal,seq,0), MATRIX_AT(norm_sample,dim_signal,seq,1), MATRIX_AT(norm_sample,dim_signal,seq,2));
yukari_hinata 5:026d42b4455f 336 }
yukari_hinata 5:026d42b4455f 337 */
yukari_hinata 5:026d42b4455f 338
yukari_hinata 1:da597cb284a2 339 /* 学習ステップその2:逆誤差伝搬 */
yukari_hinata 1:da597cb284a2 340 // 誤差信号の計算
yukari_hinata 5:026d42b4455f 341 for (int n_dim = 0; n_dim < dim_signal; n_dim++) {
yukari_hinata 1:da597cb284a2 342 if (seq < len_seqence - 1) {
yukari_hinata 5:026d42b4455f 343 sigma[n_dim] = (out_signal[n_dim] - MATRIX_AT(norm_sample,dim_signal,(seq+1),n_dim)) * out_signal[n_dim] * (1 - out_signal[n_dim]);
yukari_hinata 1:da597cb284a2 344 } else {
yukari_hinata 1:da597cb284a2 345 /* 末尾と先頭の誤差を取る (大抵,大きくなる) */
yukari_hinata 5:026d42b4455f 346 sigma[n_dim] = (out_signal[n_dim] - MATRIX_AT(norm_sample,dim_signal,0,n_dim)) * out_signal[n_dim] * (1 - out_signal[n_dim]);
yukari_hinata 1:da597cb284a2 347 }
yukari_hinata 1:da597cb284a2 348 }
yukari_hinata 5:026d42b4455f 349 // printf("%d Sigma : %f %f %f \r\n", iteration, sigma[0], sigma[1], sigma[2]);
yukari_hinata 0:0d42047e140c 350
yukari_hinata 1:da597cb284a2 351 // 出力->中間層の係数の変更量計算
yukari_hinata 1:da597cb284a2 352 for (int n = 0; n < dim_signal; n++) {
yukari_hinata 1:da597cb284a2 353 for (int j = 0; j < num_mid_neuron + 1; j++) {
yukari_hinata 1:da597cb284a2 354 MATRIX_AT(dWmid_out,num_mid_neuron,n,j) = sigma[n] * expand_mid_signal[j];
yukari_hinata 1:da597cb284a2 355 }
yukari_hinata 1:da597cb284a2 356 }
yukari_hinata 1:da597cb284a2 357
yukari_hinata 1:da597cb284a2 358 // 中間->入力層の係数の変更量計算
yukari_hinata 4:9d94330f380a 359 float sum_sigma;
yukari_hinata 1:da597cb284a2 360 for (int i = 0; i < num_mid_neuron; i++) {
yukari_hinata 1:da597cb284a2 361 // 誤差信号を逆向きに伝播させる.
yukari_hinata 1:da597cb284a2 362 sum_sigma = 0;
yukari_hinata 1:da597cb284a2 363 for (int k = 0; k < dim_signal; k++) {
yukari_hinata 1:da597cb284a2 364 sum_sigma += sigma[k] * MATRIX_AT(Wmid_out,num_mid_neuron + 1,k,i);
yukari_hinata 1:da597cb284a2 365 }
yukari_hinata 1:da597cb284a2 366 // 中間->入力層の係数の変更量計算
yukari_hinata 1:da597cb284a2 367 for (int j = 0; j < col_in_mid; j++) {
yukari_hinata 1:da597cb284a2 368 MATRIX_AT(dWin_mid,num_mid_neuron,j,i)
yukari_hinata 1:da597cb284a2 369 = sum_sigma * expand_mid_signal[i] *
yukari_hinata 1:da597cb284a2 370 (1 - expand_mid_signal[i]) *
yukari_hinata 1:da597cb284a2 371 expand_in_signal[j];
yukari_hinata 1:da597cb284a2 372 }
yukari_hinata 1:da597cb284a2 373 }
yukari_hinata 0:0d42047e140c 374
yukari_hinata 1:da597cb284a2 375 // 係数更新
yukari_hinata 1:da597cb284a2 376 for (int i = 0; i < row_in_mid; i++) {
yukari_hinata 1:da597cb284a2 377 for (int j = 0; j < col_in_mid; j++) {
yukari_hinata 1:da597cb284a2 378 //printf("[%f -> ", MATRIX_AT(Win_mid,col_in_mid,i,j));
yukari_hinata 1:da597cb284a2 379 MATRIX_AT(Win_mid,col_in_mid,i,j) =
yukari_hinata 1:da597cb284a2 380 MATRIX_AT(Win_mid,col_in_mid,i,j) -
yukari_hinata 1:da597cb284a2 381 this->learnRate * MATRIX_AT(dWin_mid,col_in_mid,i,j) -
yukari_hinata 1:da597cb284a2 382 this->alpha * MATRIX_AT(prevdWin_mid,col_in_mid,i,j);
yukari_hinata 1:da597cb284a2 383 // printf("%f] ", MATRIX_AT(Win_mid,col_in_mid,i,j));
yukari_hinata 1:da597cb284a2 384 // printf("dW : %f , prevdW : %f ", MATRIX_AT(dWin_mid,col_in_mid,i,j), MATRIX_AT(prevdWin_mid,col_in_mid,i,j));
yukari_hinata 1:da597cb284a2 385 }
yukari_hinata 1:da597cb284a2 386 //printf("\r\n");
yukari_hinata 1:da597cb284a2 387 }
yukari_hinata 1:da597cb284a2 388 for (int i = 0; i < row_mid_out; i++) {
yukari_hinata 1:da597cb284a2 389 for (int j = 0; j < col_mid_out; j++) {
yukari_hinata 1:da597cb284a2 390 MATRIX_AT(Wmid_out,col_mid_out,i,j)=
yukari_hinata 1:da597cb284a2 391 MATRIX_AT(Wmid_out,col_mid_out,i,j) -
yukari_hinata 1:da597cb284a2 392 this->learnRate * MATRIX_AT(dWmid_out,col_mid_out,i,j) -
yukari_hinata 1:da597cb284a2 393 this->alpha * MATRIX_AT(prevdWmid_out,col_mid_out,i,j);
yukari_hinata 1:da597cb284a2 394 }
yukari_hinata 1:da597cb284a2 395 }
yukari_hinata 1:da597cb284a2 396
yukari_hinata 1:da597cb284a2 397 // ループ回数/系列のインクリメント
yukari_hinata 1:da597cb284a2 398 iteration += 1;
yukari_hinata 1:da597cb284a2 399 seq += 1;
yukari_hinata 1:da597cb284a2 400
yukari_hinata 0:0d42047e140c 401 }
yukari_hinata 0:0d42047e140c 402
yukari_hinata 1:da597cb284a2 403 return squareError;
yukari_hinata 0:0d42047e140c 404 }
yukari_hinata 2:d623e7ef4dca 405
yukari_hinata 2:d623e7ef4dca 406 // サンプルの(リ)セット
yukari_hinata 2:d623e7ef4dca 407 void SRNN::set_sample(float* sample_data)
yukari_hinata 2:d623e7ef4dca 408 {
yukari_hinata 5:026d42b4455f 409 memcpy(this->sample, sample_data, sizeof(float) * len_seqence * dim_signal);
yukari_hinata 2:d623e7ef4dca 410 }