Click here to Skip to main content
Click here to Skip to main content
Articles » Languages » C# » General » Downloads
 
Add your own
alternative version

Neural Network OCR

, 11 Aug 2005 GPL3
Some ideas about optical character recognition using neural networks.
neuroocr_demo.zip
AForge.Imaging.dll
AForge.Math.dll
AForge.NeuralNet.dll
NeuroOCR.exe
SourceGrid2.dll
SourceLibrary.dll
neuroocr_src.zip
AForge
Math
Math.csproj.user
NeuralNet
Learning
NeuralNet.csproj.user
NeuroOCR
App.ico
NeuroOCR.csproj.user
References
AForge.Imaging.dll
SourceGrid2.dll
SourceLibrary.dll
// AForge Neural Net Library
//
// Copyright � Andrew Kirillov, 2005
// andrew.kirillov@gmail.com
//

namespace AForge.NeuralNet.Learning
{
	using System;

	/// <summary>
	/// Back Propagation learning
	/// </summary>
	public class BackPropagationLearning : ISupervisedLearning
	{
		private Network		net;	// network to learn
		private float		learningRate = 0.1f;
		private float		momentum = 0.0f;
		private float		learningLimit = 0.1f;

		private bool		converged = false;

		private float[][]	errors;
		private float[][][]	deltas;
		private float[][]	thresholdDeltas;

		// Learning Rate property
		public float LearningRate
		{
			get { return learningRate; }
			set { learningRate = value; }
		}
		// Momentum property
		public float Momentum
		{
			get { return momentum; }
			set { momentum = value; }
		}
		// Learning Limit property
		public float LearningLimit
		{
			get { return learningLimit; }
			set { learningLimit = value; }
		}
		// Is converged property
		public bool IsConverged
		{
			get { return converged; }
		}

		// Constructor
		public BackPropagationLearning(Network net)
		{
			this.net = net;

			// create error and deltas arrays
			errors = new float[net.LayersCount][];
			deltas = new float[net.LayersCount][][];
			thresholdDeltas = new float[net.LayersCount][];
			
			for (int i = 0; i < net.LayersCount; i++)
			{
				Layer layer = net[i];

				errors[i] = new float[layer.NeuronsCount];
				deltas[i] = new float[layer.NeuronsCount][];
				thresholdDeltas[i] = new float[layer.NeuronsCount];

				for (int j = 0; j < layer.NeuronsCount; j++)
				{
					deltas[i][j] = new float[layer.InputsCount];
				}
			}
		}

		// Perform learning epoch and return errors sum
		public float LearnEpoch(float[][] input, float[][] output)
		{
			int		i, n = input.Length;
			float	error = 0.0f;

			// for all training patterns
			for (i = 0; i < n; i++)
			{
				error += Learn(input[i], output[i]);
			}
			// determine if we converged
			converged = (error < learningLimit);

			// return error
			return error;
		}


		// Perform one learning iteration and return network error
		public float Learn(float[] input, float[] output)
		{
			// compute the network
			float[] nout = net.Compute(input);

			// calculate network error
			float error = CalculateError(output);

			// calculate weights updates
			CalculateUpdates(input);

			// update the network
			UpdateNetwork();

			// return error level
			return error;
		}

		// Calculate network errors
		private float CalculateError(float[] desiredOutput)
		{
			Layer		layer, layerNext;
			float[]		err, errNext;
			float		error = 0, e;
			float		output, sum;
			int			i, j, k, n, m, layersCount = net.LayersCount;

			// assume, that all neurons of the network have
			// the same activation function
			IActivationFunction	function = net[0][0].ActivationFunction;

			// calculate error for the last layer
			layer = net[layersCount - 1];
			err = errors[layersCount - 1];

			for (i = 0, n = layer.NeuronsCount; i < n; i++)
			{
				output = layer[i].Output;
				// error of the neuron
				e = desiredOutput[i] - output;
				// error multiplied with first derivative
				err[i] = e * function.OutputPrime2(output);
				// squre the error and sum it
				error += (e * e);
			}

			// calculate error for other layers
			for (j = layersCount - 2; j >= 0; j--)
			{
				layer		= net[j];
				layerNext	= net[j + 1];
				err			= errors[j];
				errNext		= errors[j + 1];

				// for all neurons of the layer
				for (i = 0, n = layer.NeuronsCount; i < n; i++)
				{
					sum = 0.0f;
					// for all neurons of the next layer
					for (k = 0, m = layerNext.NeuronsCount; k < m; k++)
					{
						sum += errNext[k] * layerNext[k][i];
					}
					err[i] = sum * function.OutputPrime2(layer[i].Output);
				}
			}

			// return squared error of the last layer
			return error;
		}

		// Calculate synapse (neurons weights) updates
		private void CalculateUpdates(float[] input)
		{
			Neuron		neuron;
			Layer		layer, layerPrev;
			float[][]	lDeltas;
			float[]		err, del, tdel;
			float		e;
			int			i, j, k, n, m, l;

			// 1 - for the first layer
			layer	= net[0];
			lDeltas	= deltas[0];
			err		= errors[0];
			tdel	= thresholdDeltas[0];

			// for each neuron of the layer
			for (i = 0, n = layer.NeuronsCount; i < n; i++)
			{
				neuron	= layer[i];
				del		= lDeltas[i];
				e		= err[i];

				// for each synapse of the neuron
				for (j = 0, m = neuron.InputsCount; j < m; j++)
				{
					// calculate weight update
					del[j] = learningRate * (
							momentum * del[j] +
							(1.0f - momentum) * e * input[j]
						);
				}

				// calculate treshold update
				tdel[i] = learningRate * (
					momentum * tdel[i] +
					(1.0f - momentum) * e
					);
			}

			// 2 - for all other layers
			for (k = 1, l = net.LayersCount; k < l; k++)
			{
				layerPrev = net[k - 1];
				layer	= net[k];
				lDeltas	= deltas[k];
				err		= errors[k];
				tdel	= thresholdDeltas[k];

				// for each neuron of the layer
				for (i = 0, n = layer.NeuronsCount; i < n; i++)
				{
					neuron	= layer[i];
					del		= lDeltas[i];
					e		= err[i];

					// for each synapse of the neuron
					for (j = 0, m = neuron.InputsCount; j < m; j++)
					{
						// calculate weight update
						del[j] = learningRate * (
							momentum * del[j] +
							(1.0f - momentum) * e * layerPrev[j].Output
							);
					}

					// calculate treshold update
					tdel[i] = learningRate * (
						momentum * tdel[i] +
						(1.0f - momentum) * e
						);
				}
			}
		}

		// Update weights of network
		private void UpdateNetwork()
		{
			Neuron		neuron;
			Layer		layer;
			float[][]	lDeltas;
			float[]		del, tdel;
			int			i, j, k, n, m, s;

			// for each layer of the network
			for (i = 0, n = net.LayersCount; i < n; i++)
			{
				layer = net[i];
				lDeltas = deltas[i];
				tdel = thresholdDeltas[i];

				// for each neuron of the layer
				for (j = 0, m = layer.NeuronsCount; j < m; j++)
				{
					neuron = layer[j];
					del = lDeltas[j];

					// for each weight of the neuron
					for (k = 0, s = neuron.InputsCount; k < s; k++)
					{
						// update weight
						neuron[k] += del[k];
					}
					// update treshold
					neuron.Threshold -= tdel[j];
				}
			}
		}
	}
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The GNU General Public License (GPLv3)

Share

About the Author

Andrew Kirillov
Software Developer (Senior) Cisco Systems
United Kingdom United Kingdom
Started software development at about 15 years old and it seems like now it lasts most part of my life. Fortunately did not spend too much time with Z80 and BK0010 and switched to 8086 and further. Similar with programming languages – luckily managed to get away from BASIC and Pascal to things like Assembler, C, C++ and then C#. Apart from daily programming for food, do it also for hobby, where mostly enjoy areas like Computer Vision, Robotics and AI. This led to some open source stuff like AForge.NET.
 
Going out of computers I am just a man loving his family, enjoying traveling, a bit of books, a bit of movies and a mixture of everything else. Always wanted to learn playing guitar, but it seems like 6 strings are much harder than few dozens of keyboard’s keys. Will keep progressing ...

| Advertise | Privacy | Terms of Use | Mobile
Web01 | 2.8.150327.1 | Last Updated 11 Aug 2005
Article Copyright 2005 by Andrew Kirillov
Everything else Copyright © CodeProject, 1999-2015
Layout: fixed | fluid