Click here to Skip to main content
Click here to Skip to main content
Add your own
alternative version

Artificial Neural Networks made easy with the FANN library

, 28 Aug 2013
Neural networks are typically associated with specialised applications, developed only by select groups of experts. This misconception has had a highly negative effect on its popularity. Hopefully, the FANN library will help fill this gap.
fann-1_2_0.zip
fann-1.2.0
debian
changelog
compat
control
copyright
docs
libfann1-dev.dirs
libfann1-dev.examples
libfann1-dev.files
libfann1-dev.install
libfann1.dirs
libfann1.files
libfann1.install
rules
doc
fann_doc_complete_1.0.pdf
Makefile
html
src
include
Makefile.in
Makefile.am
Makefile.in
COPYING
Makefile.am
win32_dll
examples
makefile
README
Makefile.in
configure
AUTHORS
COPYING
ChangeLog
INSTALL
Makefile.am
NEWS
TODO
aclocal.m4
config.guess
config.sub
configure.in
depcomp
fann.pc.in
fann.spec.in
install-sh
ltmain.sh
missing
mkinstalldirs
benchmarks
datasets
building.test
building.train
diabetes.test
diabetes.train
gene.test
gene.train
mushroom.test
mushroom.train
robot.test
robot.train
soybean.test
soybean.train
thyroid.test
thyroid.train
two-spiral.train
pumadyn-32fm.test
pumadyn-32fm.train
two-spiral.test
parity8.train
parity8.test
parity13.test
parity13.train
Makefile
README
benchmark.sh
benchmarks.pdf
gnuplot
performance.cc
quality.cc
.cvsignore
examples
Makefile
xor.data
python
README
examples
libfann.i
makefile.gnu
makefile.msvc
libfann.pyc
MSVC++
libfann.dsp
all.dsw
simple_test.dsp
simple_train.dsp
steepness_train.dsp
xor_test.dsp
xor_train.dsp
config.in
fann_win32_dll-1_2_0.zip
changelog
compat
control
copyright
docs
libfann1-dev.dirs
libfann1-dev.examples
libfann1-dev.files
libfann1-dev.install
libfann1.dirs
libfann1.files
libfann1.install
rules
fann_doc_complete_1.0.pdf
Makefile
Makefile.in
Makefile.am
Makefile.in
COPYING
Makefile.am
makefile
README
Makefile.in
configure
AUTHORS
COPYING
ChangeLog
INSTALL
Makefile.am
NEWS
TODO
aclocal.m4
config.guess
config.sub
configure.in
depcomp
fann.pc.in
fann.spec.in
install-sh
ltmain.sh
missing
mkinstalldirs
building.test
building.train
diabetes.test
diabetes.train
gene.test
gene.train
mushroom.test
mushroom.train
robot.test
robot.train
soybean.test
soybean.train
thyroid.test
thyroid.train
two-spiral.train
pumadyn-32fm.test
pumadyn-32fm.train
two-spiral.test
parity8.train
parity8.test
parity13.test
parity13.train
Makefile
README
benchmark.sh
benchmarks.pdf
gnuplot
performance.cc
quality.cc
.cvsignore
Makefile
xor.data
README
libfann.i
makefile.gnu
makefile.msvc
libfann.pyc
libfann.dsp
all.dsw
simple_test.dsp
simple_train.dsp
steepness_train.dsp
xor_test.dsp
xor_train.dsp
config.in
bin
fanndoubled.dll
fanndoubled.lib
fanndoubleMTd.dll
fanndoubleMTd.lib
fannfixedd.dll
fannfixedd.lib
fannfixedMTd.dll
fannfixedMTd.lib
fannfloatd.dll
fannfloatd.lib
fannfloatMTd.dll
fannfloatMTd.lib
fanndouble.dll
fanndouble.lib
fanndoubleMT.dll
fanndoubleMT.lib
fannfixed.dll
fannfixed.lib
fannfixedMT.dll
fannfixedMT.lib
fannfloat.dll
fannfloat.lib
fannfloatMT.dll
fannfloatMT.lib
vs_net2003.zip
VS.NET2003
/*
  Fast Artificial Neural Network Library (fann)
  Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
  
  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.
  
  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  Lesser General Public License for more details.
  
  You should have received a copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/

#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>

#include "config.h"
#include "fann.h"
#include "fann_errno.h"

/* Prints all of the parameters and options of the ANN */
FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
{
	struct fann_layer *layer_it;
	
	printf("Input layer                : %2d neurons, 1 bias\n", ann->num_input);
	for(layer_it = ann->first_layer+1; layer_it != ann->last_layer-1; layer_it++){
		printf("  Hidden layer             : %2d neurons, 1 bias\n",
			layer_it->last_neuron - layer_it->first_neuron - 1);
	}
	printf("Output layer               : %2d neurons\n", ann->num_output);
	printf("Total neurons and biases   : %2d\n", fann_get_total_neurons(ann));
	printf("Total connections          : %2d\n", ann->total_connections);
	printf("Connection rate            : %5.2f\n", ann->connection_rate);
	printf("Shortcut connections        : %2d\n", ann->shortcut_connections);
	printf("Training algorithm         :  %s\n", FANN_TRAIN_NAMES[ann->training_algorithm]);	
	printf("Learning rate              : %5.2f\n", ann->learning_rate);
	printf("Activation function hidden :  %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_hidden]);
	printf("Activation function output :  %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_output]);
#ifndef FIXEDFANN
	printf("Activation steepness hidden: %5.2f\n", ann->activation_steepness_hidden);
	printf("Activation steepness output: %5.2f\n", ann->activation_steepness_output);
#else
	printf("Activation steepness hidden: %d\n", ann->activation_steepness_hidden);
	printf("Activation steepness output: %d\n", ann->activation_steepness_output);
	printf("Decimal point              : %2d\n", ann->decimal_point);
	printf("Multiplier                 : %2d\n", ann->multiplier);
#endif
	printf("Training error function    :  %s\n", FANN_ERRORFUNC_NAMES[ann->train_error_function]);
	printf("Quickprop decay            : %9.6f\n", ann->quickprop_decay);
	printf("Quickprop mu               : %5.2f\n", ann->quickprop_mu);
	printf("RPROP increase factor      : %5.2f\n", ann->rprop_increase_factor);
	printf("RPROP decrease factor      : %5.2f\n", ann->rprop_decrease_factor);
	printf("RPROP delta min            : %5.2f\n", ann->rprop_delta_min);
	printf("RPROP delta max            : %5.2f\n", ann->rprop_delta_max);
}

FANN_EXTERNAL unsigned int FANN_API fann_get_training_algorithm(struct fann *ann)
{
	return ann->training_algorithm;
}

FANN_EXTERNAL void FANN_API fann_set_training_algorithm(struct fann *ann, unsigned int training_algorithm)
{
	ann->training_algorithm = training_algorithm;
}

FANN_EXTERNAL void FANN_API fann_set_learning_rate(struct fann *ann, float learning_rate)
{
	ann->learning_rate = learning_rate;
}

FANN_EXTERNAL void FANN_API fann_set_activation_function_hidden(struct fann *ann, unsigned int activation_function)
{
	ann->activation_function_hidden = activation_function;
	fann_update_stepwise_hidden(ann);
}

FANN_EXTERNAL void FANN_API fann_set_activation_function_output(struct fann *ann, unsigned int activation_function)
{
	ann->activation_function_output = activation_function;
	fann_update_stepwise_output(ann);
}

FANN_EXTERNAL void FANN_API fann_set_activation_steepness_hidden(struct fann *ann, fann_type steepness)
{
	ann->activation_steepness_hidden = steepness;
	fann_update_stepwise_hidden(ann);
}

FANN_EXTERNAL void FANN_API fann_set_activation_steepness_output(struct fann *ann, fann_type steepness)
{
	ann->activation_steepness_output = steepness;
	fann_update_stepwise_output(ann);
}

FANN_EXTERNAL void FANN_API fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness)
{
	fann_set_activation_steepness_hidden(ann, steepness);
}

FANN_EXTERNAL void FANN_API fann_set_activation_output_steepness(struct fann *ann, fann_type steepness)
{
	fann_set_activation_steepness_output(ann, steepness);
}

FANN_EXTERNAL float FANN_API fann_get_learning_rate(struct fann *ann)
{
	return ann->learning_rate;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_num_input(struct fann *ann)
{
	return ann->num_input;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_num_output(struct fann *ann)
{
	return ann->num_output;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_activation_function_hidden(struct fann *ann)
{
	return ann->activation_function_hidden;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_activation_function_output(struct fann *ann)
{
	return ann->activation_function_output;
}

FANN_EXTERNAL fann_type FANN_API fann_get_activation_hidden_steepness(struct fann *ann)
{
	return ann->activation_steepness_hidden;
}

FANN_EXTERNAL fann_type FANN_API fann_get_activation_output_steepness(struct fann *ann)
{
	return ann->activation_steepness_output;
}

FANN_EXTERNAL fann_type FANN_API fann_get_activation_steepness_hidden(struct fann *ann)
{
	return ann->activation_steepness_hidden;
}

FANN_EXTERNAL fann_type FANN_API fann_get_activation_steepness_output(struct fann *ann)
{
	return ann->activation_steepness_output;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_total_neurons(struct fann *ann)
{
	/* -1, because there is always an unused bias neuron in the last layer */
	return ann->total_neurons - 1;
}

FANN_EXTERNAL unsigned int FANN_API fann_get_total_connections(struct fann *ann)
{
	return ann->total_connections;
}

fann_type * fann_get_weights(struct fann *ann)
{
	return (ann->first_layer+1)->first_neuron->weights;
}

struct fann_neuron** fann_get_connections(struct fann *ann)
{
	return (ann->first_layer+1)->first_neuron->connected_neurons;
}


/* When using this, training is usually faster. (default ).
   Makes the error used for calculating the slopes
   higher when the difference is higher.
 */
FANN_EXTERNAL void FANN_API fann_set_train_error_function(struct fann *ann, unsigned int train_error_function)
{
	ann->train_error_function = train_error_function;
}

/* Decay is used to make the weights do not go so high (default -0.0001). */
FANN_EXTERNAL void FANN_API fann_set_quickprop_decay(struct fann *ann, float quickprop_decay)
{
	ann->quickprop_decay = quickprop_decay;
}
	
/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
FANN_EXTERNAL void FANN_API fann_set_quickprop_mu(struct fann *ann, float quickprop_mu)
{
	ann->quickprop_mu = quickprop_mu;
}

/* Tells how much the stepsize should increase during learning (default 1.2). */
FANN_EXTERNAL void FANN_API fann_set_rprop_increase_factor(struct fann *ann, float rprop_increase_factor)
{
	ann->rprop_increase_factor = rprop_increase_factor;
}

/* Tells how much the stepsize should decrease during learning (default 0.5). */
FANN_EXTERNAL void FANN_API fann_set_rprop_decrease_factor(struct fann *ann, float rprop_decrease_factor)
{
	ann->rprop_decrease_factor = rprop_decrease_factor;
}

/* The minimum stepsize (default 0.0). */
FANN_EXTERNAL void FANN_API fann_set_rprop_delta_min(struct fann *ann, float rprop_delta_min)
{
	ann->rprop_delta_min = rprop_delta_min;
}

/* The maximum stepsize (default 50.0). */
FANN_EXTERNAL void FANN_API fann_set_rprop_delta_max(struct fann *ann, float rprop_delta_max)
{
	ann->rprop_delta_max = rprop_delta_max;
}

/* When using this, training is usually faster. (default ).
   Makes the error used for calculating the slopes
   higher when the difference is higher.
 */
FANN_EXTERNAL unsigned int FANN_API fann_get_train_error_function(struct fann *ann)
{
	return ann->train_error_function;
}

/* Decay is used to make the weights do not go so high (default -0.0001). */
FANN_EXTERNAL float FANN_API fann_get_quickprop_decay(struct fann *ann)
{
	return ann->quickprop_decay;
}
	
/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
FANN_EXTERNAL float FANN_API fann_get_quickprop_mu(struct fann *ann)
{
	return ann->quickprop_mu;
}

/* Tells how much the stepsize should increase during learning (default 1.2). */
FANN_EXTERNAL float FANN_API fann_get_rprop_increase_factor(struct fann *ann)
{
	return ann->rprop_increase_factor;
}

/* Tells how much the stepsize should decrease during learning (default 0.5). */
FANN_EXTERNAL float FANN_API fann_get_rprop_decrease_factor(struct fann *ann)
{
	return ann->rprop_decrease_factor;
}

/* The minimum stepsize (default 0.0). */
FANN_EXTERNAL float FANN_API fann_get_rprop_delta_min(struct fann *ann)
{
	return ann->rprop_delta_min;
}

/* The maximum stepsize (default 50.0). */
FANN_EXTERNAL float FANN_API fann_get_rprop_delta_max(struct fann *ann)
{
	return ann->rprop_delta_max;
}

#ifdef FIXEDFANN
/* returns the position of the fix point.
 */
FANN_EXTERNAL unsigned int FANN_API fann_get_decimal_point(struct fann *ann)
{
	return ann->decimal_point;
}

/* returns the multiplier that fix point data is multiplied with.
 */
FANN_EXTERNAL unsigned int FANN_API fann_get_multiplier(struct fann *ann)
{
	return ann->multiplier;
}

#endif

/* INTERNAL FUNCTION
   Adjust the steepwise functions (if used)
*/
void fann_update_stepwise_hidden(struct fann *ann)
{
	unsigned int i = 0;
#ifndef FIXEDFANN
	/* For use in stepwise linear activation function.
	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
	*/
	switch(ann->activation_function_hidden){
		case FANN_SIGMOID:
		case FANN_SIGMOID_STEPWISE:
			ann->activation_results_hidden[0] = (fann_type)0.005;
			ann->activation_results_hidden[1] = (fann_type)0.05;
			ann->activation_results_hidden[2] = (fann_type)0.25;
			ann->activation_results_hidden[3] = (fann_type)0.75;
			ann->activation_results_hidden[4] = (fann_type)0.95;
			ann->activation_results_hidden[5] = (fann_type)0.995;	
			break;
		case FANN_SIGMOID_SYMMETRIC:
		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
			ann->activation_results_hidden[0] = (fann_type)-0.99;
			ann->activation_results_hidden[1] = (fann_type)-0.9;
			ann->activation_results_hidden[2] = (fann_type)-0.5;
			ann->activation_results_hidden[3] = (fann_type)0.5;
			ann->activation_results_hidden[4] = (fann_type)0.9;
			ann->activation_results_hidden[5] = (fann_type)0.99;
			break;
		default:
			/* the actiavation functions which do not have a stepwise function
			   should not have it calculated */
			return;
	}
#else
	/* Calculate the parameters for the stepwise linear
	   sigmoid function fixed point.
	   Using a rewritten sigmoid function.
	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
	*/
	switch(ann->activation_function_hidden){
		case FANN_SIGMOID:
		case FANN_SIGMOID_STEPWISE:
			ann->activation_results_hidden[0] = (fann_type)(ann->multiplier/200.0+0.5);
			ann->activation_results_hidden[1] = (fann_type)(ann->multiplier/20.0+0.5);
			ann->activation_results_hidden[2] = (fann_type)(ann->multiplier/4.0+0.5);
			ann->activation_results_hidden[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
			ann->activation_results_hidden[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
			ann->activation_results_hidden[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
			break;
		case FANN_SIGMOID_SYMMETRIC:
		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
			ann->activation_results_hidden[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
			ann->activation_results_hidden[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
			ann->activation_results_hidden[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
			ann->activation_results_hidden[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
			ann->activation_results_hidden[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
			ann->activation_results_hidden[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
			break;
		default:
			/* the actiavation functions which do not have a stepwise function
			   should not have it calculated */
			return;
	}			
#endif

	for(i = 0; i < 6; i++){
#ifndef FIXEDFANN
		switch(ann->activation_function_hidden){
			case FANN_SIGMOID:
				break;
			case FANN_SIGMOID_STEPWISE:
				ann->activation_values_hidden[i] = (fann_type)((log(1.0/ann->activation_results_hidden[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_steepness_hidden);
				break;
			case FANN_SIGMOID_SYMMETRIC:
			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
				ann->activation_values_hidden[i] = (fann_type)((log((1.0-ann->activation_results_hidden[i]) / (ann->activation_results_hidden[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_steepness_hidden);
				break;
		}
#else
		switch(ann->activation_function_hidden){
			case FANN_SIGMOID:
			case FANN_SIGMOID_STEPWISE:
				ann->activation_values_hidden[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results_hidden[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_hidden);
				break;
			case FANN_SIGMOID_SYMMETRIC:
			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
				ann->activation_values_hidden[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_results_hidden[i])/((float)ann->activation_results_hidden[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_hidden);
				break;
		}
#endif
	}
}

/* INTERNAL FUNCTION
   Adjust the steepwise functions (if used)
*/
void fann_update_stepwise_output(struct fann *ann)
{
	unsigned int i = 0;
#ifndef FIXEDFANN
	/* For use in stepwise linear activation function.
	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
	*/
	switch(ann->activation_function_output){
		case FANN_SIGMOID:
		case FANN_SIGMOID_STEPWISE:
			ann->activation_results_output[0] = (fann_type)0.005;
			ann->activation_results_output[1] = (fann_type)0.05;
			ann->activation_results_output[2] = (fann_type)0.25;
			ann->activation_results_output[3] = (fann_type)0.75;
			ann->activation_results_output[4] = (fann_type)0.95;
			ann->activation_results_output[5] = (fann_type)0.995;	
			break;
		case FANN_SIGMOID_SYMMETRIC:
		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
			ann->activation_results_output[0] = (fann_type)-0.99;
			ann->activation_results_output[1] = (fann_type)-0.9;
			ann->activation_results_output[2] = (fann_type)-0.5;
			ann->activation_results_output[3] = (fann_type)0.5;
			ann->activation_results_output[4] = (fann_type)0.9;
			ann->activation_results_output[5] = (fann_type)0.99;
			break;
		default:
			/* the actiavation functions which do not have a stepwise function
			   should not have it calculated */
			return;
	}
#else
	/* Calculate the parameters for the stepwise linear
	   sigmoid function fixed point.
	   Using a rewritten sigmoid function.
	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
	*/
	switch(ann->activation_function_output){
		case FANN_SIGMOID:
		case FANN_SIGMOID_STEPWISE:
			ann->activation_results_output[0] = (fann_type)(ann->multiplier/200.0+0.5);
			ann->activation_results_output[1] = (fann_type)(ann->multiplier/20.0+0.5);
			ann->activation_results_output[2] = (fann_type)(ann->multiplier/4.0+0.5);
			ann->activation_results_output[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
			ann->activation_results_output[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
			ann->activation_results_output[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
			break;
		case FANN_SIGMOID_SYMMETRIC:
		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
			ann->activation_results_output[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
			ann->activation_results_output[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
			ann->activation_results_output[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
			ann->activation_results_output[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
			ann->activation_results_output[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
			ann->activation_results_output[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
			break;
		default:
			/* the actiavation functions which do not have a stepwise function
			   should not have it calculated */
			return;
	}			
#endif

	for(i = 0; i < 6; i++){
#ifndef FIXEDFANN
		switch(ann->activation_function_output){
			case FANN_SIGMOID:
				break;
			case FANN_SIGMOID_STEPWISE:
				ann->activation_values_output[i] = (fann_type)((log(1.0/ann->activation_results_output[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_steepness_output);
				break;
			case FANN_SIGMOID_SYMMETRIC:
			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
				ann->activation_values_output[i] = (fann_type)((log((1.0-ann->activation_results_output[i]) / (ann->activation_results_output[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_steepness_output);
				break;
		}
#else
		switch(ann->activation_function_output){
			case FANN_SIGMOID:
			case FANN_SIGMOID_STEPWISE:
				ann->activation_values_output[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results_output[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_output);
				break;
			case FANN_SIGMOID_SYMMETRIC:
			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
				ann->activation_values_output[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_results_output[i])/((float)ann->activation_results_output[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_output);
				break;
		}
#endif
	}
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)

Share

About the Author

Software Developer's Journal
Publisher
Poland Poland
Software Developer's Journal (formerly Software 2.0) is a magazine for professional programmers and developers publishing news from the software world and practical articles presenting very interesting ready programming solutions. To read more

| Advertise | Privacy | Mobile
Web04 | 2.8.140827.1 | Last Updated 28 Aug 2013
Article Copyright 2006 by Software Developer's Journal
Everything else Copyright © CodeProject, 1999-2014
Terms of Service
Layout: fixed | fluid