Click here to Skip to main content
15,885,141 members
Articles / Artificial Intelligence / Neural Networks

Multiple convolution neural networks approach for online handwriting recognition

Rate me:
Please Sign up or sign in to vote.
4.95/5 (37 votes)
9 Apr 2013CPOL8 min read 75.9K   25.1K   74  
The research focuses on the presentation of word recognition technique for an online handwriting recognition system which uses multiple component neural networks (MCNN) as the exchangeable parts of the classifier.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using ANN.Perceptron.ArchiveSerialization;
using ANN.Perceptron.Common;
using ANN.Perceptron.Connections;
using ANN.Perceptron.Neurons;
using ANN.Perceptron.Weights;
using System.Drawing;
using System.Threading;
using System.Threading.Tasks;
namespace ANN.Perceptron.Layers
{
    public class SamplingLayer : CommonLayer
    {
        protected int kernelsize;
        public SamplingLayer(string sLabel, CommonLayer pPrev)
        {
            label = sLabel;
            prevLayer = pPrev;
            weights = null;
            //feature map size
            Size fmSize=Size.Empty;
            fmSize.Width =(int) Math.Floor((double)prevLayer.FeatureMapSize.Width / 2);
            fmSize.Height = (int)Math.Floor((double)prevLayer.FeatureMapSize.Height / 2);
            //feature map count
            int nMaps=prevLayer.FeatureMapCount;
            //kernel size
            int sizeofkernel = 2;
            //number of neurons
            neurons = new Neuron[fmSize.Width * fmSize.Height * nMaps];

            featureMapSize = fmSize;
            kernelsize = sizeofkernel;
            nFeatureMaps = nMaps;
            weightCount = 0;
            floatingPointWarning = false;
            type = LayerTypes.Sampling;
            ParallelOption = new ParallelOptions();
            ParallelOption.TaskScheduler = null;
            _maxDegreeOfParallelism = Environment.ProcessorCount;
            ParallelOption.MaxDegreeOfParallelism = _maxDegreeOfParallelism;
        }
        public override void Initialize()
        {
            floatingPointWarning = false;
            CreateLayer();
        }
        protected override void CreateLayer()
        {
            int iNumWeight;
            var rdm = new Random();
            if (neuronCount > 0 || neurons != null)
            { //clear neurons
                neurons = null;
                neuronCount = 0;
            }
            if (weightCount > 0 || weights != null)
            {
                //clear weights;
                weights = null;
                weightCount = 0;
            }
          
             
            
            // layer subsampling:
            // This layer is a subsampling layer that has nFeatureMaps feature maps as same as the previous convolution layer.  Each feature 
            // map is featureMapSize.Width * featureMapSize.Height, and each unit in the feature maps 
            // is a kernelsize x kernelsze (2x2) convolutional kernel of the input layer.
            // So, there are (nFeatureMaps * featureMapSize.Width * featureMapSize.Height) neurons, 
            // nWeights in this subsampling layer is 1;
            if (prevLayer != null)
            {
                neuronCount = nFeatureMaps * featureMapSize.Width * featureMapSize.Height;
                weightCount = 1; //number of weights in this layer is 1
                neurons = new Neuron[NeuronCount];
                for (int ii = 0; ii < neuronCount; ii++)
                {
                    String lb = String.Format("Layer {0}, Neuron {1}", label, ii);
                    neurons[ii] = new Neuron(lb);
                }
                rdm = new Random();
                weights = new Weight[weightCount];
                for (int jj = 0; jj < weightCount; jj++)
                {
                    String lb = String.Format("Layer {0}, Weight {1}", label, jj);
                    double initWeight = 0.05 * (2.0 * rdm.NextDouble() - 1.0);
                    weights[jj] = new Weight(lb, initWeight);
                }

                // interconnections with previous layer: this is difficult
                // The previous layer is a a convolution layer.
                // Each neuron in this layer is connected to a 2x2 kernel in its feature map. We move the kernel by TWO pixels, i.e., we
                // skip every other pixel in the input image

                int[] kernelTemplate = CreateKernelTemplate(kernelsize, prevLayer.FeatureMapSize.Width);
                for (int fm = 0; fm < nFeatureMaps; fm++)
                {
                    for (int h = 0; h < featureMapSize.Height; h++)
                    {
                        for (int w = 0; w < featureMapSize.Width; w++)
                        {
                            iNumWeight = 0;
                            Neuron n = neurons[w + h * featureMapSize.Width + fm * featureMapSize.Width * featureMapSize.Height];
                            // each neuron in this layer have kernelsize * kernelsize connections (2x2), no bias.
                            int connCount = kernelsize * kernelsize ; //no bias
                            n.ConnectionCount = connCount;
                            n.Connections = new Connection[connCount];
                            //add connections to neuron                         
                            for (int k = 0; k < kernelsize * kernelsize; k++)
                            {
                                int iNeurons = kernelsize * w + (kernelsize * h * prevLayer.FeatureMapSize.Width) + kernelTemplate[k];
                                int connID = k;
                                n.AddConnection((uint)iNeurons, (uint)iNumWeight, connID);                               
                            }
                        }
                    }
                }
            }
          
        }
        protected int[] CreateKernelTemplate(int size, int fmWidth)
        {
            int[] kernelTemplate = new int[size * size];
            //generate template
            Parallel.For(0, size, ParallelOption, i =>
            {
                for (int j = 0; j < size; j++)
                {
                    kernelTemplate[i + j * size] = i + j * fmWidth;
                }
            });
            return kernelTemplate;
        }
    }
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)


Written By
Vietnam Maritime University
Vietnam Vietnam
This member has not yet provided a Biography. Assume it's interesting and varied, and probably something to do with programming.

Comments and Discussions