rakha asyrofi
/
ANN_Coba
machine learning ANN perlu bantuan
Revision 0:482cc6c25690, committed 2018-04-14
- Comitter:
- asyrofi
- Date:
- Sat Apr 14 04:32:05 2018 +0000
- Commit message:
- perlu bantuan;
Changed in this revision
main.cpp | Show annotated file Show diff for this revision Revisions of this file |
mbed.bld | Show annotated file Show diff for this revision Revisions of this file |
diff -r 000000000000 -r 482cc6c25690 main.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/main.cpp Sat Apr 14 04:32:05 2018 +0000 @@ -0,0 +1,255 @@ +#include <math.h> +#include <mbed.h> +Serial pc(USBTX, USBRX); +const int PatternCount = 10; +const int InputNodes = 7; +const int HiddenNodes = 8; +const int OutputNodes = 4; +const float LearningRate = 0.3; +const float Momentum = 0.9; +const float InitialWeightMax = 0.5; +const float Success = 0.0004; + +const byte Input[PatternCount][InputNodes] = { + { 1, 1, 1, 1, 1, 1, 0 }, // 0 + { 0, 1, 1, 0, 0, 0, 0 }, // 1 + { 1, 1, 0, 1, 1, 0, 1 }, // 2 + { 1, 1, 1, 1, 0, 0, 1 }, // 3 + { 0, 1, 1, 0, 0, 1, 1 }, // 4 + { 1, 0, 1, 1, 0, 1, 1 }, // 5 + { 0, 0, 1, 1, 1, 1, 1 }, // 6 + { 1, 1, 1, 0, 0, 0, 0 }, // 7 + { 1, 1, 1, 1, 1, 1, 1 }, // 8 + { 1, 1, 1, 0, 0, 1, 1 } // 9 +}; + +const byte Target[PatternCount][OutputNodes] = { + { 0, 0, 0, 0 }, + { 0, 0, 0, 1 }, + { 0, 0, 1, 0 }, + { 0, 0, 1, 1 }, + { 0, 1, 0, 0 }, + { 0, 1, 0, 1 }, + { 0, 1, 1, 0 }, + { 0, 1, 1, 1 }, + { 1, 0, 0, 0 }, + { 1, 0, 0, 1 } +}; + +int i, j, p, q, r; +int ReportEvery1000; +int RandomizedIndex[PatternCount]; +long TrainingCycle; +float Rando; +float Error; +float Accum; + + +float Hidden[HiddenNodes]; +float Output[OutputNodes]; +float HiddenWeights[InputNodes+1][HiddenNodes]; +float OutputWeights[HiddenNodes+1][OutputNodes]; +float HiddenDelta[HiddenNodes]; +float OutputDelta[OutputNodes]; +float ChangeHiddenWeights[InputNodes+1][HiddenNodes]; +float ChangeOutputWeights[HiddenNodes+1][OutputNodes]; + +int main() +{ + pc.baud(9600); + randomSeed(analogRead(3)); + ReportEvery1000 = 1; + for( p = 0 ; p < PatternCount ; p++ ) + { + RandomizedIndex[p] = p ; + } + + void loop () + { + + + + for( i = 0 ; i < HiddenNodes ; i++ ) + { + for( j = 0 ; j <= InputNodes ; j++ ) + { + ChangeHiddenWeights[j][i] = 0.0 ; + Rando = float(random(100))/100; + HiddenWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ; + } + } + + for( i = 0 ; i < OutputNodes ; i ++ ) + { + for( j = 0 ; j <= HiddenNodes ; j++ ) + { + ChangeOutputWeights[j][i] = 0.0 ; + Rando = float(random(100))/100; + OutputWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ; + } + } + pc.printf("Initial/Untrained Outputs: \n"); + toTerminal(); + + for( TrainingCycle = 1 ; TrainingCycle < 2147483647 ; TrainingCycle++) + { + + for( p = 0 ; p < PatternCount ; p++) + { + q = random(PatternCount); + r = RandomizedIndex[p] ; + RandomizedIndex[p] = RandomizedIndex[q] ; + RandomizedIndex[q] = r ; + } + Error = 0.0 ; + + for( q = 0 ; q < PatternCount ; q++ ) + { + p = RandomizedIndex[q]; + + for( i = 0 ; i < HiddenNodes ; i++ ) + { + Accum = HiddenWeights[InputNodes][i] ; + for( j = 0 ; j < InputNodes ; j++ ) + { + Accum += Input[p][j] * HiddenWeights[j][i] ; + } + Hidden[i] = 1.0/(1.0 + exp(-Accum)) ; + } + + + for( i = 0 ; i < OutputNodes ; i++ ) + { + Accum = OutputWeights[HiddenNodes][i] ; + for( j = 0 ; j < HiddenNodes ; j++ ) + { + Accum += Hidden[j] * OutputWeights[j][i] ; + } + Output[i] = 1.0/(1.0 + exp(-Accum)) ; + OutputDelta[i] = (Target[p][i] - Output[i]) * Output[i] * (1.0 - Output[i]) ; + Error += 0.5 * (Target[p][i] - Output[i]) * (Target[p][i] - Output[i]) ; + } + + + for( i = 0 ; i < HiddenNodes ; i++ ) + { + Accum = 0.0 ; + for( j = 0 ; j < OutputNodes ; j++ ) + { + Accum += OutputWeights[i][j] * OutputDelta[j] ; + } + HiddenDelta[i] = Accum * Hidden[i] * (1.0 - Hidden[i]) ; + } + + + + + for( i = 0 ; i < HiddenNodes ; i++ ) + { + ChangeHiddenWeights[InputNodes][i] = LearningRate * HiddenDelta[i] + Momentum * ChangeHiddenWeights[InputNodes][i] ; + HiddenWeights[InputNodes][i] += ChangeHiddenWeights[InputNodes][i] ; + for( j = 0 ; j < InputNodes ; j++ ) + { + ChangeHiddenWeights[j][i] = LearningRate * Input[p][j] * HiddenDelta[i] + Momentum * ChangeHiddenWeights[j][i]; + HiddenWeights[j][i] += ChangeHiddenWeights[j][i] ; + } + } + + + for( i = 0 ; i < OutputNodes ; i ++ ) + { + ChangeOutputWeights[HiddenNodes][i] = LearningRate * OutputDelta[i] + Momentum * ChangeOutputWeights[HiddenNodes][i] ; + OutputWeights[HiddenNodes][i] += ChangeOutputWeights[HiddenNodes][i] ; + for( j = 0 ; j < HiddenNodes ; j++ ) + { + ChangeOutputWeights[j][i] = LearningRate * Hidden[j] * OutputDelta[i] + Momentum * ChangeOutputWeights[j][i] ; + OutputWeights[j][i] += ChangeOutputWeights[j][i] ; + } + } + } + + + ReportEvery1000 = ReportEvery1000 - 1; + if (ReportEvery1000 == 0) + { + pc.printf ("TrainingCycle: "); + pc.printf ("%i",(int16_t)TrainingCycle); + pc.printf (" Error = "); + pc.printf ("%i\n",(int16_t)Error, 5); + + toTerminal(); + + if (TrainingCycle==1) + { + ReportEvery1000 = 999; + } + else + { + ReportEvery1000 = 1000; + } + } + + if( Error < Success ) break ; + } + pc.printf ("TrainingCycle: "); + pc.printf ("%i",(int16_t)TrainingCycle); + pc.printf (" Error = "); + pc.printf ("%i\n",(int16_t)Error, 5); + + toTerminal(); + + pc.printf ("Training Set Solved!\n"); + pc.printf ("--------\n"); + ReportEvery1000 = 1; + } +} + +void toTerminal() +{ + + for( p = 0 ; p < PatternCount ; p++ ) { + pc.printf (" Training Pattern: "); + pc.printf ("%i\n",(int16_t)p); + pc.printf (" Input "); + for( i = 0 ; i < InputNodes ; i++ ) { + pc.printf ("%i,%i",(int16_t)Input[p][i],(int16_t)DEC); + pc.printft (" "); + } + pc.printf (" Target "); + for( i = 0 ; i < OutputNodes ; i++ ) { + pc.printf ("%i,%i",(int16_t)Input[p][i],(int16_t)DEC); + Serial.print (" "); + } +/****************************************************************** +* Compute hidden layer activations +******************************************************************/ + + for( i = 0 ; i < HiddenNodes ; i++ ) { + Accum = HiddenWeights[InputNodes][i] ; + for( j = 0 ; j < InputNodes ; j++ ) { + Accum += Input[p][j] * HiddenWeights[j][i] ; + } + Hidden[i] = 1.0/(1.0 + exp(-Accum)) ; + } + +/****************************************************************** +* Compute output layer activations and calculate errors +******************************************************************/ + + for( i = 0 ; i < OutputNodes ; i++ ) { + Accum = OutputWeights[HiddenNodes][i] ; + for( j = 0 ; j < HiddenNodes ; j++ ) { + Accum += Hidden[j] * OutputWeights[j][i] ; + } + Output[i] = 1.0/(1.0 + exp(-Accum)) ; + } + pc.printf (" Output "); + for( i = 0 ; i < OutputNodes ; i++ ) { + pc.printf ("%i",Output[i], 5); + pc.printf (" "); + } + } + + +} +
diff -r 000000000000 -r 482cc6c25690 mbed.bld --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mbed.bld Sat Apr 14 04:32:05 2018 +0000 @@ -0,0 +1,1 @@ +http://mbed.org/users/mbed_official/code/mbed/builds/4f6c30876dfa \ No newline at end of file