weka.classifiers.neural.singlelayerperceptron.algorithm.PerceptronAlgorithm Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of wekaclassalgos Show documentation
Show all versions of wekaclassalgos Show documentation
Fork of the following defunct sourceforge.net project: https://sourceforge.net/projects/wekaclassalgos/
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
package weka.classifiers.neural.singlelayerperceptron.algorithm;
import weka.classifiers.neural.common.RandomWrapper;
import weka.classifiers.neural.common.SimpleNeuron;
import weka.classifiers.neural.common.learning.LearningRateKernel;
import weka.classifiers.neural.common.transfer.TransferFunction;
import weka.core.Instance;
import weka.core.Instances;
/**
* Title: Weka Neural Implementation
* Description: ...
* Copyright: Copyright (c) 2003
* Company: N/A
*
* @author Jason Brownlee
* @version 1.0
*/
public class PerceptronAlgorithm extends SLPAlgorithmAncestor {
public PerceptronAlgorithm(TransferFunction aTransfer,
double aBiasInput,
RandomWrapper aRand,
LearningRateKernel aKernel,
Instances trainingInstances) {
super(aTransfer, aBiasInput, aRand, aKernel, trainingInstances);
}
protected void calculateWeightErrors(Instance instance,
SimpleNeuron neuron,
double expected,
double aLearningRate) {
// perceptron learning rule: delta = LearningRate * (Target - Output) * Input
int offset = 0;
// calculate the output for the neuron
double activation = activate(neuron, instance);
double output = transfer(activation);
// get the node weights
double[] weights = neuron.getWeights();
// udpate neuron weights
for (int i = 0; i < instance.numAttributes(); i++) {
// class is not an attribute
if (i != instance.classIndex()) {
// never adjust the weight connected to a missing value
// it is not included in thew activation, thus has no impact in the result
if (instance.isMissing(i)) {
offset++;
}
else {
// perceptron learning rule:
// delta = LearningRate * (Target - Output) * Input
weights[offset++] += (aLearningRate * (expected - output) * instance.value(i));
}
}
}
// update the weight on this bias
offset = neuron.getBiasIndex();
weights[offset] += (aLearningRate * (expected - output) * neuron.getBiasInputValue());
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy