Click here to Skip to main content
Click here to Skip to main content
Add your own
alternative version

AI: Neural Network for Beginners (Part 3 of 3)

, 29 Jan 2007 CPOL
AI: An introduction into neural networks (multi-layer networks / trained by Microbial GA).
using System;
using System.Collections.Generic;
using System.Text;

namespace GA_ANN_XOR
{
    #region NN_Trainer_XOR CLASS
    /// <summary>
    /// Provides a GA trainer for a
    /// <see cref="NeuralNetwork">NeuralNetwork</see> class
    /// with 2 inputs, 2 hidden, and 1 output, which is trying
    /// to approximate the XOR problem
    /// </summary>
    public class GA_Trainer_XOR
    {
        #region Instance fields
        private Random gen = new Random(5);
        private int training_times = 10000;
        private double[,] train_set =
            {{0, 0},
             {0, 1},
             {1,0},
             {1,1}};

        //population size
        private int POPULATION = 15;
        //ANN's
        private NeuralNetwork[] networks;
        //Mutation
        private double MUTATION = 0.5;
        //Recombination
        private double RECOMBINE = 0.4;
        //flag to detect when we hav found good ANN
        private bool foundGoodANN = false;
        //number of outputs
        private int trainLoop = 0;
        //best configuration index
        private int bestConfiguration = -1;
        //acceptable overall Neural Networ error
        private double acceptableNNError = 0.1;
        //events for gui, generated by the GA trainer
        public delegate void GAChangeHandler(Object sender, TrainerEventArgs te);
        public event GAChangeHandler GAChange;
        public event EventHandler GATrainingDone;
        //events for gui, generated by the NeuralNetwork, but propgated up to gui
        //by the GA trainer, thats why they this event is here, the gui knows nothing
        //about the array of NeuralNetworks, so the event must come through trainer
        public delegate void ChangeHandler(Object sender, NeuralNetworkEventArgs nne);
        public event ChangeHandler NNChange;

        #endregion
        #region Public Properties/Methods


        /// <summary>
        /// Performs a microbial GA (best of last breeding cycle stays in population)
        /// on an array of <see cref="NeuralNetwork"> NeuralNetworks</see> in an attempt
        /// to find a solution to the XOR logix problem. The training presents the entire
        /// training set to a random pair of <see cref="NeuralNetwork"> NeuralNetworks,</see>
        ///  and evaluates which one does best. The winners genes, and some mutation are used
        /// to shape the losers genes, in the hope that the new population will be moving
        /// towards a closer solution.
        /// </summary>
        /// <param name="training_times">the number of times to carry out the
        /// training loop</param>
        /// <returns>The best <see cref="NeuralNetwork"> NeuralNetworks </see>
        /// configuartion found</returns>
        public NeuralNetwork doTraining(int training_times)
        {


            int a = 0;
            int b = 0;
            int WINNER = 0;
            int LOSER = 0;

            #region Training
            //loop for the trainingPeriod
            for (trainLoop = 0; trainLoop < training_times; trainLoop++)
            {
                //fire training loop event
                TrainerEventArgs te = new TrainerEventArgs(trainLoop);
                On_GAChange(te);
                NeuralNetwork.isInTraining = true;

                //if the previous evaluation cyle, found a good ANN configuration
                //quit the traning cycle, otherwise, let the breeding continue
                if (foundGoodANN)
                {
                    break;
                }

                //pick 2 ANN's at random, GA - SELECTION
                a = (int)(gen.NextDouble() * POPULATION);
                b = (int)(gen.NextDouble() * POPULATION);

                //work out which was the WINNER and LOSER, GA - EVALUATION
                if (evaluate(a) < evaluate(b))
                {
                    WINNER = a;
                    LOSER = b;
                }
                else
                {
                    WINNER = b;
                    LOSER = a;
                }

                ////get the current value of the ANN weights
                double[,] WINNER_i_to_h_wts = networks[WINNER].InputToHiddenWeights;
                double[,] LOSER_i_to_h_wts = networks[LOSER].InputToHiddenWeights;
                double[,] WINNER_h_to_o_wts = networks[WINNER].HiddenToOutputWeights;
                double[,] LOSER_h_to_o_wts = networks[LOSER].HiddenToOutputWeights;

                ////i_to_h_wts RECOMBINATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfInputs + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfHidden; l++)
                    {
                        //get genes from winner randomly for i_to_h_wts wieghts
                        if (gen.NextDouble() < RECOMBINE)
                        {
                            // set the weights to be that of the input weights from GA
                            LOSER_i_to_h_wts[k,l] = WINNER_i_to_h_wts[k,l];
                        }
                    }
                }

                //h_to_o_wts RECOMBINATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfHidden + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfOutputs; l++)
                    {
                        //get genes from winner randomly for i_to_h_wts wieghts
                        if (gen.NextDouble() < RECOMBINE)
                        {
                            // set the weights to be that of the input weights from GA
                            LOSER_h_to_o_wts[k,l] = WINNER_h_to_o_wts[k,l];
                        }
                    }
                }

                //i_to_h_wts MUTATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfInputs + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfHidden; l++)
                    {
                        //add some mutation randomly
                        if (gen.NextDouble() < MUTATION)
                        {
                            LOSER_i_to_h_wts[k,l] += ((gen.NextDouble() * 0.2) - 0.1);
                        }
                    }
                }

                //h_to_o_wts MUTATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfHidden + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfOutputs; l++)
                    {
                        //add some mutation randomly
                        if (gen.NextDouble() < MUTATION)
                        {
                            LOSER_h_to_o_wts[k,l] += ((gen.NextDouble() * 0.2) - 0.1);
                        }
                    }
                }

                //update the losers i_to_h_wts genotype
                networks[LOSER].InputToHiddenWeights = LOSER_i_to_h_wts;
                //update the losers i_to_h_wts genotype
                networks[LOSER].HiddenToOutputWeights = LOSER_h_to_o_wts;

            }
            #endregion


            //AT THIS POINT ITS EITHER THE END OF TRAINING OR WE HAVE
            //FOUND AN ACCEPTABLE ANN, WHICH IS BELOW THE VALUE


            //tell gui that training is now done
            On_GATrainingDone(new EventArgs());
            NeuralNetwork.isInTraining = false;

            //check to see if there was a best configuration found, may not have done
            //enough training to find a good NeuralNetwork configuration, so will simply
            //have to return the WINNER
            if (bestConfiguration == -1)
            {
                bestConfiguration = WINNER;
            }
            //return the best Neural network
            return networks[bestConfiguration];

        }



        /// <summary>
        /// Is called after the initial training is completed.
        /// Sipmly presents 1 complete set of the training set to
        /// the trained network, which should hopefully get it pretty
        /// correct now its trained
        /// </summary>
        public void doActualRun()
        {
            //loop through the entire training set
            for (int i = 0; i <= train_set.GetUpperBound(0); i++)
            {
                //forward these new values through network
                //forward weights through ANN
                forwardWeights(bestConfiguration, getTrainSet(i));
                double[] targetValues = getTargetValues(getTrainSet(i));
            }
        }


        #endregion
        #region Constructor
        /// <summary>
        /// Constructs a new GA_Trainer_XOR. The constructor creates
        /// the population of <see cref="NeuralNetwork">NeuralNetworks</see>
        ///  and also wires up the underlying <see cref="NeuralNetwork">
        /// NeuralNetworks</see> events, to a new GA event, such that the
        /// <see cref="NeuralNetwork">NeuralNetworks</see> event can be 
        /// propogated to the gui
        /// </summary>
        public GA_Trainer_XOR()
        {
            networks = new NeuralNetwork[POPULATION];

            //create new ANN objects, random weights applied at start
            for (int i = 0; i <= networks.GetUpperBound(0); i++)
            {
                networks[i] = new NeuralNetwork(2, 2, 1);
                networks[i].Change += new NeuralNetwork.ChangeHandler(GA_Trainer_NN_Change);
            }
        }

        #endregion
        #region Events


        

        /// <summary>
        /// Raises the GA TrainingDone event
        /// </summary>
        /// <param name="te">The TrainerEventArgs</param>
        public virtual void On_GATrainingDone(EventArgs ea)
        {
            if (GATrainingDone != null)
            {
                // Invokes the delegates. 
                GATrainingDone(this, ea);
            }
        }


        /// <summary>
        /// Raises the GA Change event
        /// </summary>
        /// <param name="te">The TrainerEventArgs</param>
        public virtual void On_GAChange(TrainerEventArgs te)
        {
            if (GAChange != null)
            {
                // Invokes the delegates. 
                GAChange(this, te);
            }
        }

        /// <summary>
        /// Raises the NeuralNetwork Change event, simply propogates
        /// original <see cref="NeuralNetwork">NeuralNetwork</see> 
        /// event up to the GUI
        /// </summary>
        /// <param name="nne">The NeuralNetworkEventArgs</param>
        public virtual void On_NNChange(NeuralNetworkEventArgs nne)
        {
            if (NNChange != null)
            {
                // Invokes the delegates. 
                NNChange(this, nne);
            }
        }
        #endregion
        #region Private Methods

        /// <summary>
        /// Evaluates a member of the population (of <see cref="NeuralNetwork">
        /// NeuralNetworks</see>
        /// </summary>
        /// <param name="popMember">The member of the population to evaluate</param>
        /// <returns>An overall error value for this population member, which is
        /// the result of applying the complete training set to the population
        /// member, with its current weight configuration</returns>
        private double evaluate(int popMember)
        {

            double error = 0.0;

            //loop through the entire training set
            for (int i = 0; i <= train_set.GetUpperBound(0); i++)
            {
                //forward these new values through network
                //forward weights through ANN
                forwardWeights(popMember, getTrainSet(i));
                double[] targetValues = getTargetValues(getTrainSet(i));
                error += networks[popMember].getError(targetValues);

            }
            //if the Error term is < acceptableNNError value we have found
            //a good configuration of weights for teh NeuralNetwork, so tell
            //GA to stop looking
            if (error < acceptableNNError)
            {
                bestConfiguration = popMember;
                foundGoodANN = true;
            }

            //return error
            return error;
        }


        /// <summary>
        /// This event is simply here to propogate the underlying 
        /// <see cref="NeuralNetwork">NeuralNetworks</see> Change
        /// event, to the gui. The gui has no visibility of the 
        /// array of <see cref="NeuralNetwork">NeuralNetworks</see>
        /// so this trainer class propogates the events from the
        /// <see cref="NeuralNetwork">NeuralNetworks</see> to the gui
        /// </summary>
        /// <param name="sender">The orginal <see cref="NeuralNetwork">NeuralNetwork</see>
        /// that changed</param>
        /// <param name="nne">The NeuralNetworkEventArgs</param>
        private void GA_Trainer_NN_Change(object sender, NeuralNetworkEventArgs nne)
        {
            On_NNChange(nne);
        }

        /// <summary>
        /// Returns the array within the 2D train_set array as the index
        /// specfied by the idx input parameter
        /// </summary>
        /// <param name="idx">The index into the 2d array to get</param>
        /// <returns>The array within the 2D train_set array as the index
        /// specfied by the idx input parameter</returns>
        private double[] getTrainSet(int idx)
        {
            //NOTE :
            //
            //If anyone can tell me how to return an array at index idx from
            //a 2D array, which is holding arrays of arrays I would like that
            //very much.
            //I thought it would be
            //double[] trainValues= (double[])train_set.GetValue(0);
            //but this didn't work, so am doing it like this

            double[] trainValues = { train_set[idx, 0], train_set[idx, 1] };
            return trainValues;
        }


        /// <summary>
        /// Forwards the weights from the input->hidden and also from
        /// the hidden->output nodes, for the trainingSet
        /// </summary>
        /// <param name="popMember">The population member</param>
        /// <param name="trainingSet">The training set to present to the 
        /// <see cref="NeuralNetwork"/>NeuralNetwork</param>
        private void forwardWeights(int popMember, double[] trainingSet)
        {
            //forward weights through ANN
            networks[popMember].pass_forward(trainingSet,getTargetValues(trainingSet));
        }

        /// <summary>
        /// Returns a double which represents the output for the
        /// current set of inputs.
        /// In the cases where the summed inputs = 1, then target
        /// should be 1.0, otherwise it should be 0.0. 
        /// This is only for the XOR problem, but this is a trainer
        /// for the XOR problem, so this is fine.
        /// </summary>
        /// <param name="currSet">The current set of inputs</param>
        /// <returns>A double which represents the output for the
        /// current set of inputs</returns>
        private double[] getTargetValues(double[] currSet)
        {
            //the current value of the training set
            double valOfSet = 0;
            double[] targs = new double[1];
            for (int i = 0; i < currSet.Length; i++)
            {
                valOfSet += currSet[i];
            }
            //in the cases where the summed inputs = 1, then target
            //should be 1.0, otherwise it should be 0.0
            targs[0] = valOfSet == 1 ? 1.0 : 0.0;
            return targs;
        }
        #endregion
    }
    #endregion
    #region TrainerEventArgs CLASS
    /// <summary>
    /// Provides the event argumets for the 
    /// <see cref="GA_Trainer_XOR">trainer</see> class
    /// </summary>
    public class TrainerEventArgs : EventArgs
    {
        #region Instance Fields
        //Instance fields
        private int trainLoop = 0;

        #endregion
        #region Public Constructor

        /// <summary>
        /// Constructs a new TrainerEventArgs object using the parameters provided
        /// </summary>
        /// <param name="trainLoop">The current training loop</param>
        public TrainerEventArgs(int trainLoop)
        {
            this.trainLoop = trainLoop;
        }
        #endregion
        #region Public Methods/Properties

        /// <summary>
        /// gets the training loop number
        /// </summary>
        public int TrainingLoop
        {
            get { return trainLoop; }
        }
        #endregion

    }
    #endregion
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)

Share

About the Author

Sacha Barber
Software Developer (Senior)
United Kingdom United Kingdom
I currently hold the following qualifications (amongst others, I also studied Music Technology and Electronics, for my sins)
 
- MSc (Passed with distinctions), in Information Technology for E-Commerce
- BSc Hons (1st class) in Computer Science & Artificial Intelligence
 
Both of these at Sussex University UK.
 
Award(s)

I am lucky enough to have won a few awards for Zany Crazy code articles over the years

  • Microsoft C# MVP 2014
  • Codeproject MVP 2014
  • Microsoft C# MVP 2013
  • Codeproject MVP 2013
  • Microsoft C# MVP 2012
  • Codeproject MVP 2012
  • Microsoft C# MVP 2011
  • Codeproject MVP 2011
  • Microsoft C# MVP 2010
  • Codeproject MVP 2010
  • Microsoft C# MVP 2009
  • Codeproject MVP 2009
  • Microsoft C# MVP 2008
  • Codeproject MVP 2008
  • And numerous codeproject awards which you can see over at my blog

| Advertise | Privacy | Mobile
Web02 | 2.8.141022.1 | Last Updated 30 Jan 2007
Article Copyright 2006 by Sacha Barber
Everything else Copyright © CodeProject, 1999-2014
Terms of Service
Layout: fixed | fluid