Generates neural net instances through genetic algorithm for use in task learning by m3pi. Reward determined by detection of black areas on arena floor, or by amount of area explored in limited time. Due to memory size, limited # of nets.

Dependencies:   Ping m3pimaze mbed

Code/CNeuralNet.cpp

Committer:
parkbanks
Date:
2015-04-23
Revision:
0:ac93d9db8dbd

File content as of revision 0:ac93d9db8dbd:

#include "CNeuralNet.h"


// Neuron Methods
SNeuron::SNeuron(int NumInputs): m_NumInputs(NumInputs+1)                                           
{
    //we need an additional weight for the bias hence the +1
    for (int i=0; i<NumInputs+1; ++i)
    {
        //set up the weights with an initial random value
        m_vecWeight.push_back(RandomClamped());
    }
}


// Neuron Layer Methods
// Calls SNeuron constructor for each neuron in each layer
SNeuronLayer::SNeuronLayer(int NumNeurons, int NumInputsPerNeuron): m_NumNeurons(NumNeurons)
{
    for (int i=0; i<NumNeurons; ++i)

        m_vecNeurons.push_back(SNeuron(NumInputsPerNeuron));
}


// CNeuralNet Methods
// creates a ANN based on given values
// add function so values are taken from main
CNeuralNet::CNeuralNet() 
{
    m_NumInputs = 2;
    m_NumOutputs = 2;
    m_NumHiddenLayers = 0;
    m_NeuronsPerHiddenLyr = 0;
    CreateNet();
}

// Builds ANN with random weights from -1 to 1.
void CNeuralNet::CreateNet()
{
    //network layers
    if (m_NumHiddenLayers > 0)
    {
        //first hidden layer
        m_vecLayers.push_back(SNeuronLayer(m_NeuronsPerHiddenLyr, m_NumInputs));
    
        for (int i=0; i<m_NumHiddenLayers-1; ++i)
        {
            m_vecLayers.push_back(SNeuronLayer(m_NeuronsPerHiddenLyr, m_NeuronsPerHiddenLyr));
        }
        //output layer
        m_vecLayers.push_back(SNeuronLayer(m_NumOutputs, m_NeuronsPerHiddenLyr));
    }
    else{
        m_vecLayers.push_back(SNeuronLayer(m_NumOutputs, m_NumInputs));
    }
}

//  returns vector of weights
vector<float> CNeuralNet::GetWeights() const
{
    //this will hold the weights
    vector<float> weights;
    
    for (int i=0; i<m_NumHiddenLayers + 1; ++i){
        for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j){
            for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k){
                weights.push_back(m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k]);
            }
        }
    }
    return weights;
}

// replaces weights with values given in float vector
void CNeuralNet::PutWeights(vector<float> &weights)
{
    int cWeight = 0;
    for (int i=0; i<m_NumHiddenLayers + 1; ++i){
        for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j){
            for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k){
                m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] = weights[cWeight++];
            }
        }
    }
    return;
}

// returns # of weights needed for net
int CNeuralNet::GetNumberOfWeights() const
{
    int weights = 0;
    for (int i=0; i<m_NumHiddenLayers + 1; ++i){
        for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j){
            for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k){
                weights++;
            }   
        }
    }
    return weights;
}

// Calculates output values from inputs
vector<float> CNeuralNet::Update(vector<float> &inputs)
{
    //stores the resultant outputs from each layer
    vector<float> outputs;
    int cWeight = 0;
    
    //layer
    for (int i=0; i<m_NumHiddenLayers + 1; ++i){       
        if ( i > 0 ){
            inputs = outputs;
        }
        outputs.clear();
        cWeight = 0;

        //for each neuron sum inputs*weights and feed to sigmoid for weight
        for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j){
            float netinput = 0;
            int NumInputs = m_vecLayers[i].m_vecNeurons[j].m_NumInputs;
            
            //for each weight
            for (int k=0; k<NumInputs - 1; ++k){
                //sum weights*inputs
                netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] * inputs[cWeight++];
            }

            //add in bias
            netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[NumInputs-1] * -1; //bias is -1

            //store outputs from each layer, feed combined activation through sigmoid 
            outputs.push_back(Sigmoid(netinput, 1));//activation response of 1
            cWeight = 0;
        }
    }
    return outputs;
}

// Sigmoid function
float CNeuralNet::Sigmoid(float netinput, float response){
    return ( 1 / ( 1 + exp(-netinput / response)));
}