Click here to Skip to main content
15,883,741 members
Articles / Artificial Intelligence / Machine Learning

Comparing Neural Networks in Neuroph, Encog and JOONE

Rate me:
Please Sign up or sign in to vote.
4.25/5 (8 votes)
2 Jun 2010LGPL38 min read 52.3K   886   14  
Highlights the differences in how you create an XOR network in Neuroph, Encog and JOONE
import java.util.Vector;

import org.encog.neural.data.NeuralData;
import org.encog.neural.data.NeuralDataPair;
import org.neuroph.core.NeuralNetwork;
import org.neuroph.core.learning.SupervisedTrainingElement;
import org.neuroph.core.learning.TrainingElement;
import org.neuroph.core.learning.TrainingSet;
import org.neuroph.nnet.MultiLayerPerceptron;
import org.neuroph.nnet.learning.DynamicBackPropagation;
import org.neuroph.nnet.learning.MomentumBackpropagation;
import org.neuroph.util.TransferFunctionType;


public class NeurophXOR {
    /**
     * Runs this sample
     */
    public static void main(String[] args) {
        
        // create training set (logical XOR function)
        TrainingSet trainingSet = new TrainingSet(2, 1);
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 0}, new double[]{0}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{0}));

        // create multi layer perceptron
        MultiLayerPerceptron network = new MultiLayerPerceptron(TransferFunctionType.TANH, 2, 3, 1);
        
        DynamicBackPropagation train = new DynamicBackPropagation();
        train.setNeuralNetwork(network);
        network.setLearningRule(train);
        
        int epoch = 1;
        do
        {
        	train.doOneLearningIteration(trainingSet);
        	System.out.println("Epoch " + epoch + ", error=" + train.getTotalNetworkError());
        	epoch++;
        	
        } while(train.getTotalNetworkError()>0.01);
        
        System.out.println("Neural Network Results:");
        
        
        for(TrainingElement element : trainingSet.trainingElements()) {
        	network.setInput(element.getInput());
            network.calculate();
            Vector<Double> output = network.getOutput();
            SupervisedTrainingElement ste = (SupervisedTrainingElement)element;
            
			System.out.println(element.getInput().get(0) + "," + element.getInput().get(0)
					+ ", actual=" + output.get(0) + ",ideal=" + ste.getDesiredOutput().get(0));
		}


    }


}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The GNU Lesser General Public License (LGPLv3)


Written By
Other Rutgers University
United States United States
Hello, I am a student at Rutgers University. I am in computer science and am learning about machine learning and AI.

Comments and Discussions