Click here to Skip to main content
Click here to Skip to main content
Add your own
alternative version

CNeuralNetwork: Make Your Neural Network Learn Faster

, 12 Aug 2009 CPOL
An article on making neural network learn faster
Neural_Network_VS2008.zip
Neural Network_VS
Neural Network
Neural Network-Demo.exe
Neural Network.vcproj.RIS-808E3E7FF65.exeskeleton.user
SPECT.train
// auralius manurung
// gyeongsang national university
// jinju, south korea
// june 2009
// based on daniel admassu's work -> http://www.codeproject.com/KB/recipes/UnicodeOCR.aspx

#include <string>
#include <math.h>
#include <stdlib.h>
#include <stdarg.h>
#include <iostream>
#include <fstream>

using namespace std;

#define MAX_NEURON_PER_LAYER	30

class CNeuralNetwork
{
public:
	enum{
		HARD_RANDOM,
		RANDOM,
		NGUYEN,
		INPUT_FIRST,
		OUTPUT_FIRST
	};
	CNeuralNetwork();
	~CNeuralNetwork();

  /**
	* Create the desired neural network.
	* Important: pay attention on the last paramater (...)
	*
	* @param input_num = number on input neuron
	* @param output_num = number of output neuron
	* @param hidden_layer_n um = number of hidden layer
	* @param ... = number of neurons on each hidden layer
	*/
	void ann_create_network(unsigned int input_num, unsigned int output_num, unsigned int hidden_layer_num, ...);

  /**
	* Set learning rate value.
	*
	* @param learning_rate = learning rate
	*/
	void ann_set_learning_rate(float learning_rate = 0);

   /**
	* Set momentum value.
	* Momentum value should be between 0 to 1.
	*
	* @param momentum = momentum value
	*/
	void ann_set_momentum(float momentum = 0);

  /**
	* Set leraning rate changing factor for adaptive learning.
	* It should be between 0 to 1.
	*
	* @param lr_factor = how rapid the learning rate should change
	*/
	void ann_set_lr_changing_factor(float lr_factor = 0);

  /**
	* Set slope value for logistic sigmoid activation function.
	*
	* @param slope_value = slope value of the sigmoid function
	*/
	void ann_set_slope_value(float slope_value = 1);

  /**
	* Set desired weight initializaton method.
	* Option: HARD_RANDOM, RANDOM, NGUYEN.
	* For HARD_RANDOM only, you must specify the range.
	*
	* @param method = desired method
	* @param range = range value, only for HARD_RANDOM
	*/
	void ann_set_weight_init_method(int method = NGUYEN , float range = 0);

  /**
	* Get last average error in one epoch after a training complete.
	*
	* @return  average error
	*/
    float ann_get_average_error();

  /**
	* Get number of epoch needed to complete training.
	*
	* @return  number of epoch
	*/
    int ann_get_epoch_num();

  /**
	* Train the neural network with train set from a text file and log the result to result.log.
	* The train-set file should contain input and desired output.
	*
	* @param file_name = file name for the train-set file
	* @return number of total epochs
	*/
	void ann_train_network_from_file(char *file_name, int max_epoch, float max_error, int parsing_direction);

  /**
	* Test the TRAINED neural network with test set from a text file and log the result to another file..
	* The test-set file should contain input and desired output.
	*
	* @param file_name = file name for the test-set file
	* @param log_file = the result will be logged here
	*/
	void ann_test_network_from_file(char *file_name, char *log_file, int parsing_direction);

  /**
	* Set inpur per neuron in input layer.
	* If your neural network has two inputs, the channel will be 0 and 1.
	*
	* @param input_channel = input channel
	* @param input = input value, the range: -1 to 1 (bipolar)
	*/
	void ann_set_input_per_channel(unsigned int input_channel, float input);

  /**
	* Simulate the neural network based on the current input.
	* After performing simulation, you can see the output by calling ann_get_output.
	*/
	void ann_simulate();

  /**
	* Get the otput after performing simulation.
	* If your neural network has two outputs, the channel will be 0 and 1.
	*
	* @param channel = output channel
	*/
	float ann_get_output(unsigned int channel);

  /**
	* Avoid memory leakage.
	* Delete all previous dynamically created variables.
	*
	* @param channel = output channel
	*/
	void ann_clear();

private:
	float rand_float_range(float a, float b);
	float sigmoid(float f_net);
	float sigmoid_derivative(float result);
	void initialize_weights();
	void calculate_outputs();
	void calculate_weights();
	void calculate_errors();
	float get_mse_error();
	void parse_data(string data_seq, int parsing_direction = INPUT_FIRST);
	float get_norm_of_weight(int layer_num, int neuron_num);
    void generate_report();

	int	m_layer_num;
	int	*m_neuron_num;	            // this holds information of number of neurons on each layer

	float			m_learning_rate;
	float           m_lr_factor;                // learning rate changing factor, for adaptive learning

	float			m_momentum;

	float           m_slope;

	float			m_init_val;					// weight init value
	int				m_method;					// method for weight initialization

	float			m_average_error;			// average error in 1 epoch

	float			*m_current_input;
	float			*m_desired_output;

	float			***m_weight;

	float			**m_node_output;
	float			**m_node_output_prev;

	float			**m_error;
	float			**m_error_prev;

    int             m_epoch;

};

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)

Share

About the Author

auralius manurung
Gyeongsang National University, South Korea
Indonesia Indonesia
from Indonesia with love... Smile | :)

| Advertise | Privacy | Terms of Use | Mobile
Web01 | 2.8.141223.1 | Last Updated 12 Aug 2009
Article Copyright 2009 by auralius manurung
Everything else Copyright © CodeProject, 1999-2014
Layout: fixed | fluid