Simple Recurrent Neural Network Predictor
Diff: SRNN.cpp
- Revision:
- 4:9d94330f380a
- Parent:
- 2:d623e7ef4dca
- Child:
- 5:026d42b4455f
--- a/SRNN.cpp Mon Feb 16 07:53:23 2015 +0000 +++ b/SRNN.cpp Wed Feb 18 15:01:17 2015 +0000 @@ -51,9 +51,12 @@ SRNN::~SRNN(void) { - delete [] sample; delete [] sample_maxmin; - delete [] predict_signal; delete [] Win_mid; - delete [] Wmid_out; delete [] expand_in_signal; + delete [] sample; + delete [] sample_maxmin; + delete [] predict_signal; + delete [] Win_mid; + delete [] Wmid_out; + delete [] expand_in_signal; delete [] expand_mid_signal; } @@ -70,7 +73,7 @@ void SRNN::predict(float* input) { float *norm_input = new float[this->dim_signal]; - + // output signal float* out_signal = new float[dim_signal]; // value of network in input->hidden layer @@ -116,12 +119,14 @@ for (int n=0; n < dim_signal; n++) { predict_signal[i_predict * dim_signal + n] = expand_signal(out_signal[n],sample_maxmin[n * 2],sample_maxmin[n * 2 + 1]); } - + } - + // 領域解放 - delete [] norm_input; delete [] out_signal; - delete [] in_mid_net; delete [] mid_out_net; + delete [] norm_input; + delete [] out_signal; + delete [] in_mid_net; + delete [] mid_out_net; } @@ -141,11 +146,24 @@ // 係数行列の更新量 float* dWin_mid = new float[row_in_mid * col_in_mid]; float* dWmid_out = new float[row_mid_out * col_mid_out]; + // 前回の更新量:慣性項に用いる. float* prevdWin_mid = new float[row_in_mid * col_in_mid]; float* prevdWmid_out = new float[row_mid_out * col_mid_out]; + float* norm_sample = new float[len_seqence * dim_signal]; // 正規化したサンプル信号; 実際の学習は正規化した信号を用います. + // 出力層の信号 + float* out_signal = new float[dim_signal]; + + // 入力層->中間層の信号和 + float* in_mid_net = new float[num_mid_neuron]; + // 中間層->出力層の信号和. + float* mid_out_net = new float[dim_signal]; + + // 誤差信号 + float* sigma = new float[dim_signal]; + // 係数行列の初期化 for (int i=0; i < row_in_mid; i++) for (int j=0; j < col_in_mid; j++) @@ -156,28 +174,17 @@ MATRIX_AT(Wmid_out,col_mid_out,i,j) = uniform_rand(width_initW); // 信号の正規化:経験上,非常に大切な処理 - for (int seq=0; seq < len_seqence; seq++) { - for (int n=0; n < dim_signal; n++) { - MATRIX_AT(norm_sample,dim_signal,seq,n) = - normalize_signal(MATRIX_AT(this->sample,dim_signal,seq,n), - MATRIX_AT(this->sample_maxmin,2,n,0), - MATRIX_AT(this->sample_maxmin,2,n,1)); - // printf("%f ", MATRIX_AT(norm_sample,dim_signal,seq,n)); + for (int i_seq=0; i_seq < len_seqence; i_seq++) { + for (int dim_n=0; dim_n < dim_signal; dim_n++) { + MATRIX_AT(norm_sample,dim_signal,i_seq,dim_n) = + normalize_signal(MATRIX_AT(this->sample,dim_signal,i_seq,dim_n), + MATRIX_AT(this->sample_maxmin,2,dim_n,0), + MATRIX_AT(this->sample_maxmin,2,dim_n,1)); + // printf("%f ", MATRIX_AT(norm_sample,dim_signal,i_seq,dim_n)); } - // printf("\r\n"); + //printf("\r\n"); } - // 出力層の信号 - float* out_signal = new float[dim_signal]; - - // 入力層->中間層の信号和 - float* in_mid_net = new float[num_mid_neuron]; - // 中間層->出力層の信号和. - float* mid_out_net = new float[dim_signal]; - - // 誤差信号 - float* sigma = new float[dim_signal]; - // 前回の二乗誤差値:収束判定に用いる. float prevError; squareError = FLT_MAX; @@ -221,42 +228,49 @@ } else { // コンテキスト層 = 前回のコンテキスト層の出力 // 前回の中間層信号との線形和をシグモイド関数に通す. - for (int d = 0; d < num_mid_neuron; d++) { - expand_in_signal[dim_signal + d] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d] + expand_mid_signal[d]); + for (int d_in = 0; d_in < num_mid_neuron; d_in++) { + expand_in_signal[dim_signal + d_in] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d_in] + expand_mid_signal[d_in]); } } + + // printf("%d matrix calc start. \r\n", iteration); + // バイアス項は常に1に固定. expand_in_signal[dim_signal + num_mid_neuron] = 1; - + // printf(" in bias OK \r\n"); // 入力->中間層の出力信号和計算 multiply_mat_vec(Win_mid, expand_in_signal, in_mid_net, num_mid_neuron, dim_signal + num_mid_neuron + 1); + // printf(" in->mid OK \r\n"); // 中間層の出力信号計算 sigmoid_vec(in_mid_net, expand_mid_signal, num_mid_neuron); + // printf(" mid sigmoid OK \r\n"); expand_mid_signal[num_mid_neuron] = 1; + // printf(" mid bias OK \r\n"); // 中間->出力層の出力信号和計算 multiply_mat_vec(Wmid_out, expand_mid_signal, mid_out_net, dim_signal, num_mid_neuron + 1); + // printf(" mid->out OK \r\n"); // 出力層の出力信号計算 sigmoid_vec(mid_out_net, out_signal, dim_signal); - + // printf(" out sigmoid OK \r\n"); - for (int i = 0; i < dim_signal; i++) { - predict_signal[i] = expand_signal(out_signal[i], - MATRIX_AT(sample_maxmin,2,i,0), - MATRIX_AT(sample_maxmin,2,i,1)); + for (int i_dim = 0; i_dim < dim_signal; i_dim++) { + predict_signal[i_dim] = expand_signal(out_signal[i_dim], + MATRIX_AT(sample_maxmin,2,i_dim,0), + MATRIX_AT(sample_maxmin,2,i_dim,1)); } - // printf("predict : %f %f %f \r\n", predict_signal[0], predict_signal[1], predict_signal[2]); + // printf("%d predict : %f %f %f \r\n", iteration, predict_signal[0], predict_signal[1], predict_signal[2]); // print_mat(Wmid_out, row_mid_out, col_mid_out); @@ -281,7 +295,7 @@ predict_signal[i] = expand_signal(out_signal[i], MATRIX_AT(sample_maxmin,2,i,0), MATRIX_AT(sample_maxmin,2,i,1)); - //printf("%f ", predict_signal[i]); + // printf("%f ", predict_signal[i]); } break; } @@ -311,7 +325,7 @@ } // 中間->入力層の係数の変更量計算 - register float sum_sigma; + float sum_sigma; for (int i = 0; i < num_mid_neuron; i++) { // 誤差信号を逆向きに伝播させる. sum_sigma = 0; @@ -355,10 +369,15 @@ } - delete [] dWin_mid; delete [] dWmid_out; - delete [] prevdWin_mid; delete [] prevdWmid_out; - delete [] norm_sample; delete [] out_signal; - delete [] in_mid_net; delete [] mid_out_net; + delete [] dWin_mid; + delete [] dWmid_out; + delete [] prevdWin_mid; + delete [] prevdWmid_out; + delete [] norm_sample; + delete [] out_signal; + delete [] in_mid_net; + delete [] mid_out_net; + delete [] sigma; return squareError; }