Click here to Skip to main content
15,885,767 members
Articles / Artificial Intelligence / Neural Networks

Multiple convolution neural networks approach for online handwriting recognition

Rate me:
Please Sign up or sign in to vote.
4.95/5 (37 votes)
9 Apr 2013CPOL8 min read 76K   25.1K   74  
The research focuses on the presentation of word recognition technique for an online handwriting recognition system which uses multiple component neural networks (MCNN) as the exchangeable parts of the classifier.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using ANN.Perceptron.ArchiveSerialization;
using ANN.Perceptron.Common;
using ANN.Perceptron.Connections;
using ANN.Perceptron.Neurons;
using ANN.Perceptron.Weights;
using System.Drawing;
using System.Threading;
using System.Threading.Tasks;
namespace ANN.Perceptron.Layers
{
    public class ConvolutionLayer:CommonLayer
    {
        protected int kernelsize;
        public ConvolutionLayer(string sLabel, CommonLayer pPrev /* =NULL */, int nMaps, int sizeofkernel)
        {
            label = sLabel;
            prevLayer = pPrev;
            weights = null;
            Size fmSize = Size.Empty;
            fmSize.Width = prevLayer.FeatureMapSize.Width - 4;
            fmSize.Height = prevLayer.FeatureMapSize.Height - 4;
            neurons = new Neuron[ fmSize.Width* fmSize.Height * nMaps];
            featureMapSize = fmSize;
            kernelsize = sizeofkernel;
            nFeatureMaps = nMaps;
            weightCount = 0;
            floatingPointWarning = false;
            type = LayerTypes.Convolution;
            ParallelOption = new ParallelOptions();
            ParallelOption.TaskScheduler = null;
            _maxDegreeOfParallelism = Environment.ProcessorCount;
            ParallelOption.MaxDegreeOfParallelism = _maxDegreeOfParallelism;
        }
        public override void Initialize()
        {
            floatingPointWarning = false;
            CreateLayer();
        }
        protected override void CreateLayer()
        {
            int iNumWeight;
            var rdm = new Random();
            if (neuronCount > 0 || neurons != null)
            { //clear neurons
                neurons = null;
                neuronCount = 0;
            }
            if (weightCount > 0 || weights != null)
            {
                //clear weights;
                weights = null;
                weightCount = 0;
            }
          
             
            
            // layer convolution :
            // This layer is a convolutional layer that has nFeatureMaps feature maps.  Each feature 
            // map is featureMapSize.Width * featureMapSize.Height, and each unit in the feature maps 
            // is a kernelsize x kernelsze convolutional kernel
            // of the input layer.
            // So, there are (nFeatureMaps * featureMapSize.Width * featureMapSize.Height) neurons, 
            // (nWeights = (kernelsize * kernelsize + 1) * nFeatureMaps)weights
            if (prevLayer != null)
            {
                neuronCount = nFeatureMaps * featureMapSize.Width * featureMapSize.Height;
                weightCount = nFeatureMaps * (kernelsize * kernelsize * prevLayer.FeatureMapCount + 1);
                neurons = new Neuron[NeuronCount];
                for (int ii = 0; ii < neuronCount; ii++)
                {
                    String lb = String.Format("Layer {0}, Neuron {1}", label, ii);
                    neurons[ii] = new Neuron(lb);
                }
                rdm = new Random();
                weights = new Weight[weightCount];
                for (int jj = 0; jj < weightCount; jj++)
                {
                    String lb = String.Format("Layer {0}, Weight {1}", label, jj);
                    double initWeight = 0.05 * (2.0 * rdm.NextDouble() - 1.0);
                    weights[jj] = new Weight(lb, initWeight);
                }

                // interconnections with previous layer: this is difficult
                // The previous layer is a top-down bitmap image that has been padded to size 29x29
                // Each neuron in this layer is connected to a 5x5 kernel in its feature map, which 
                // is also a top-down bitmap of size 13x13.  We move the kernel by TWO pixels, i.e., we
                // skip every other pixel in the input image

                int[] kernelTemplate = CreateKernelTemplate(kernelsize, prevLayer.FeatureMapSize.Width);
                for (int fm = 0; fm < nFeatureMaps; fm++)
                {
                    for (int h = 0; h < featureMapSize.Height; h++)
                    {
                        for (int w = 0; w < featureMapSize.Width; w++)
                        {
                            iNumWeight = fm * ((int)Math.Pow(kernelsize, 2) * prevLayer.FeatureMapCount + 1); // (kernelsize*kernelsize+1) is the number of weights per feature map
                            Neuron n = neurons[w + h * featureMapSize.Width + fm * featureMapSize.Width * featureMapSize.Height];
                            int connCount = ((kernelsize * kernelsize * prevLayer.FeatureMapCount) + 1);
                            n.ConnectionCount = connCount;
                            n.Connections = new Connection[connCount];
                            n.AddConnection((uint)NNDefinations.ULONG_MAX, (uint)iNumWeight, 0); // bias weight
                            //add connections to neuron
                            for (int k = 0; k < kernelsize * kernelsize; k++)
                            {
                                for (int l = 0; l < prevLayer.FeatureMapCount; l++)
                                {
                                    int iNeurons = (w + (h * prevLayer.FeatureMapSize.Width) + kernelTemplate[k] +
                                        l * prevLayer.FeatureMapSize.Width * PrevLayer.FeatureMapSize.Height);
                                    int connID = 1 + l + k * prevLayer.FeatureMapCount;
                                    n.AddConnection((uint)iNeurons, (uint)iNumWeight++, connID);

                                }
                            }
                        }
                    }
                }
            }
          
        }
        protected int[] CreateKernelTemplate(int size, int fmWidth)
        {
            int[] kernelTemplate = new int[size * size];
            //generate template
            Parallel.For(0, size, ParallelOption, i =>
            {
                for (int j = 0; j < size; j++)
                {
                    kernelTemplate[i + j * size] = i + j * fmWidth;
                }
            });
            return kernelTemplate;
        }
    }
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)


Written By
Vietnam Maritime University
Vietnam Vietnam
This member has not yet provided a Biography. Assume it's interesting and varied, and probably something to do with programming.

Comments and Discussions