neural network trained with sine, sq, tri waveforms
Diff: neural_network3.cpp
- Revision:
- 0:7ee700dd1955
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/neural_network3.cpp Tue Nov 08 00:54:37 2016 +0000 @@ -0,0 +1,308 @@ +// +// File: neural_network3.cpp +// +// Code generated for Simulink model 'neural_network3'. +// +// Model version : 1.11 +// Simulink Coder version : 8.10 (R2016a) 10-Feb-2016 +// C/C++ source code generated on : Wed Oct 05 11:40:49 2016 +// +// Target selection: ert.tlc +// Embedded hardware selection: ARM Compatible->ARM Cortex +// Code generation objectives: Unspecified +// Validation result: Not run +// +#include "neural_network3.h" +#include "neural_network3_private.h" + +// Block signals (auto storage) +B_neural_network3_T neural_network3_B; + +// Real-time model +RT_MODEL_neural_network3_T neural_network3_M_; +RT_MODEL_neural_network3_T *const neural_network3_M = &neural_network3_M_; +real_T rt_roundd_snf(real_T u) +{ + real_T y; + if (fabs(u) < 4.503599627370496E+15) { + if (u >= 0.5) { + y = floor(u + 0.5); + } else if (u > -0.5) { + y = u * 0.0; + } else { + y = ceil(u - 0.5); + } + } else { + y = u; + } + + return y; +} + +// Model step function +void neural_network3_custom(real_T arg_In1[200], real_T arg_Out1[2]) +{ + int32_T i; + real_T tmp; + real_T tmp_0; + real_T tmp_1; + real_T tmp_2; + real_T tmp_3; + real_T rtb_Addminy; + real_T rtb_Sum1; + + // DotProduct: '<S9>/Dot Product' + tmp_2 = 0.0; + + // DotProduct: '<S19>/Dot Product' + tmp_3 = 0.0; + + // DotProduct: '<S20>/Dot Product' + rtb_Sum1 = 0.0; + + // DotProduct: '<S21>/Dot Product' + neural_network3_B.d0 = 0.0; + + // DotProduct: '<S22>/Dot Product' + neural_network3_B.d1 = 0.0; + + // DotProduct: '<S23>/Dot Product' + neural_network3_B.d2 = 0.0; + + // DotProduct: '<S24>/Dot Product' + neural_network3_B.d3 = 0.0; + + // DotProduct: '<S25>/Dot Product' + neural_network3_B.d4 = 0.0; + + // DotProduct: '<S26>/Dot Product' + neural_network3_B.d5 = 0.0; + + // DotProduct: '<S10>/Dot Product' + neural_network3_B.d6 = 0.0; + + // DotProduct: '<S11>/Dot Product' + neural_network3_B.d7 = 0.0; + + // DotProduct: '<S12>/Dot Product' + neural_network3_B.d8 = 0.0; + + // DotProduct: '<S13>/Dot Product' + neural_network3_B.d9 = 0.0; + + // DotProduct: '<S14>/Dot Product' + neural_network3_B.d10 = 0.0; + + // DotProduct: '<S15>/Dot Product' + neural_network3_B.d11 = 0.0; + + // DotProduct: '<S16>/Dot Product' + tmp = 0.0; + + // DotProduct: '<S17>/Dot Product' + tmp_0 = 0.0; + + // DotProduct: '<S18>/Dot Product' + tmp_1 = 0.0; + for (i = 0; i < 200; i++) { + // Bias: '<S32>/Add min y' incorporates: + // Bias: '<S32>/Subtract min x' + // Gain: '<S32>/range y // range x' + // Inport: '<Root>/In1' + + rtb_Addminy = (arg_In1[i] + neural_network3_ConstP.Subtractminx_Bias[i]) * + neural_network3_ConstP.rangeyrangex_Gain[i] + -1.0; + + // DotProduct: '<S9>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(1,:)'' + + tmp_2 += neural_network3_ConstP.IW111_Value[i] * rtb_Addminy; + + // DotProduct: '<S19>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(2,:)'' + + tmp_3 += neural_network3_ConstP.IW112_Value[i] * rtb_Addminy; + + // DotProduct: '<S20>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(3,:)'' + + rtb_Sum1 += neural_network3_ConstP.IW113_Value[i] * rtb_Addminy; + + // DotProduct: '<S21>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(4,:)'' + + neural_network3_B.d0 += neural_network3_ConstP.IW114_Value[i] * rtb_Addminy; + + // DotProduct: '<S22>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(5,:)'' + + neural_network3_B.d1 += neural_network3_ConstP.IW115_Value[i] * rtb_Addminy; + + // DotProduct: '<S23>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(6,:)'' + + neural_network3_B.d2 += neural_network3_ConstP.IW116_Value[i] * rtb_Addminy; + + // DotProduct: '<S24>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(7,:)'' + + neural_network3_B.d3 += neural_network3_ConstP.IW117_Value[i] * rtb_Addminy; + + // DotProduct: '<S25>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(8,:)'' + + neural_network3_B.d4 += neural_network3_ConstP.IW118_Value[i] * rtb_Addminy; + + // DotProduct: '<S26>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(9,:)'' + + neural_network3_B.d5 += neural_network3_ConstP.IW119_Value[i] * rtb_Addminy; + + // DotProduct: '<S10>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(10,:)'' + + neural_network3_B.d6 += neural_network3_ConstP.IW1110_Value[i] * rtb_Addminy; + + // DotProduct: '<S11>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(11,:)'' + + neural_network3_B.d7 += neural_network3_ConstP.IW1111_Value[i] * rtb_Addminy; + + // DotProduct: '<S12>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(12,:)'' + + neural_network3_B.d8 += neural_network3_ConstP.IW1112_Value[i] * rtb_Addminy; + + // DotProduct: '<S13>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(13,:)'' + + neural_network3_B.d9 += neural_network3_ConstP.IW1113_Value[i] * rtb_Addminy; + + // DotProduct: '<S14>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(14,:)'' + + neural_network3_B.d10 += neural_network3_ConstP.IW1114_Value[i] * + rtb_Addminy; + + // DotProduct: '<S15>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(15,:)'' + + neural_network3_B.d11 += neural_network3_ConstP.IW1115_Value[i] * + rtb_Addminy; + + // DotProduct: '<S16>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(16,:)'' + + tmp += neural_network3_ConstP.IW1116_Value[i] * rtb_Addminy; + + // DotProduct: '<S17>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(17,:)'' + + tmp_0 += neural_network3_ConstP.IW1117_Value[i] * rtb_Addminy; + + // DotProduct: '<S18>/Dot Product' incorporates: + // Constant: '<S7>/IW{1,1}(18,:)'' + + tmp_1 += neural_network3_ConstP.IW1118_Value[i] * rtb_Addminy; + } + + // Sum: '<S2>/netsum' incorporates: + // DotProduct: '<S10>/Dot Product' + // DotProduct: '<S11>/Dot Product' + // DotProduct: '<S12>/Dot Product' + // DotProduct: '<S13>/Dot Product' + // DotProduct: '<S14>/Dot Product' + // DotProduct: '<S15>/Dot Product' + // DotProduct: '<S16>/Dot Product' + // DotProduct: '<S17>/Dot Product' + // DotProduct: '<S18>/Dot Product' + // DotProduct: '<S19>/Dot Product' + // DotProduct: '<S20>/Dot Product' + // DotProduct: '<S21>/Dot Product' + // DotProduct: '<S22>/Dot Product' + // DotProduct: '<S23>/Dot Product' + // DotProduct: '<S24>/Dot Product' + // DotProduct: '<S25>/Dot Product' + // DotProduct: '<S26>/Dot Product' + // DotProduct: '<S9>/Dot Product' + + neural_network3_B.dv0[0] = tmp_2; + neural_network3_B.dv0[1] = tmp_3; + neural_network3_B.dv0[2] = rtb_Sum1; + neural_network3_B.dv0[3] = neural_network3_B.d0; + neural_network3_B.dv0[4] = neural_network3_B.d1; + neural_network3_B.dv0[5] = neural_network3_B.d2; + neural_network3_B.dv0[6] = neural_network3_B.d3; + neural_network3_B.dv0[7] = neural_network3_B.d4; + neural_network3_B.dv0[8] = neural_network3_B.d5; + neural_network3_B.dv0[9] = neural_network3_B.d6; + neural_network3_B.dv0[10] = neural_network3_B.d7; + neural_network3_B.dv0[11] = neural_network3_B.d8; + neural_network3_B.dv0[12] = neural_network3_B.d9; + neural_network3_B.dv0[13] = neural_network3_B.d10; + neural_network3_B.dv0[14] = neural_network3_B.d11; + neural_network3_B.dv0[15] = tmp; + neural_network3_B.dv0[16] = tmp_0; + neural_network3_B.dv0[17] = tmp_1; + + // DotProduct: '<S30>/Dot Product' + tmp_2 = 0.0; + + // DotProduct: '<S31>/Dot Product' + tmp_3 = 0.0; + for (i = 0; i < 18; i++) { + // Sum: '<S8>/Sum1' incorporates: + // Constant: '<S2>/b{1}' + // Constant: '<S8>/one' + // Constant: '<S8>/one1' + // Gain: '<S8>/Gain' + // Gain: '<S8>/Gain1' + // Sum: '<S2>/netsum' + // Sum: '<S8>/Sum' + + rtb_Sum1 = 1.0 / (exp((neural_network3_B.dv0[i] + + neural_network3_ConstP.b1_Value[i]) * -2.0) + 1.0) * 2.0 - 1.0; + + // DotProduct: '<S30>/Dot Product' incorporates: + // Constant: '<S28>/IW{2,1}(1,:)'' + + tmp_2 += neural_network3_ConstP.IW211_Value[i] * rtb_Sum1; + + // DotProduct: '<S31>/Dot Product' incorporates: + // Constant: '<S28>/IW{2,1}(2,:)'' + + tmp_3 += neural_network3_ConstP.IW212_Value[i] * rtb_Sum1; + } + + // Outport: '<Root>/Out1' incorporates: + // Bias: '<S33>/Subtract min y' + // DotProduct: '<S30>/Dot Product' + // DotProduct: '<S31>/Dot Product' + // Gain: '<S33>/Divide by range y' + // Rounding: '<Root>/Rounding Function' + // Sum: '<S3>/netsum' + + arg_Out1[0] = rt_roundd_snf(fabs(((tmp_2 + 0.418253410631824) + 1.0) * 0.5)); + arg_Out1[1] = rt_roundd_snf(fabs(((tmp_3 + -0.64279410179815355) + 1.0) * 0.5)); +} + +// Model initialize function +void neural_network3_initialize(void) +{ + // Registration code + + // initialize error status + rtmSetErrorStatus(neural_network3_M, (NULL)); +} + +// Model terminate function +void neural_network3_terminate(void) +{ + // (no terminate code required) +} + +// +// File trailer for generated code. +// +// [EOF] +//