weka.classifiers.meta.RegressionByDiscretization Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of weka-dev Show documentation
Show all versions of weka-dev Show documentation
The Waikato Environment for Knowledge Analysis (WEKA), a machine
learning workbench. This version represents the developer version, the
"bleeding edge" of development, you could say. New functionality gets added
to this version.
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
/*
* RegressionByDiscretization.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.meta;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.classifiers.ConditionalDensityEstimator;
import weka.classifiers.IntervalEstimator;
import weka.classifiers.SingleClassifierEnhancer;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.Utils;
import weka.estimators.UnivariateDensityEstimator;
import weka.estimators.UnivariateEqualFrequencyHistogramEstimator;
import weka.estimators.UnivariateIntervalEstimator;
import weka.estimators.UnivariateQuantileEstimator;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Discretize;
/**
* A regression scheme that employs any classifier on a copy of the data that has the class attribute (equal-width) discretized. The predicted value is the expected value of the mean class value for each discretized interval (based on the predicted probabilities for each interval).
*
*
* Valid options are:
*
* -B <int>
* Number of bins for equal-width discretization
* (default 10).
*
*
* -E
* Whether to delete empty bins after discretization
* (default false).
*
*
* -F
* Use equal-frequency instead of equal-width discretization.
*
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
*
* -W
* Full name of base classifier.
* (default: weka.classifiers.trees.J48)
*
*
* Options specific to classifier weka.classifiers.trees.J48:
*
*
* -U
* Use unpruned tree.
*
* -C <pruning confidence>
* Set confidence threshold for pruning.
* (default 0.25)
*
* -M <minimum number of instances>
* Set minimum number of instances per leaf.
* (default 2)
*
* -R
* Use reduced error pruning.
*
* -N <number of folds>
* Set number of folds for reduced error
* pruning. One fold is used as pruning set.
* (default 3)
*
* -B
* Use binary splits only.
*
* -S
* Don't perform subtree raising.
*
* -L
* Do not clean up after the tree has been built.
*
* -A
* Laplace smoothing for predicted probabilities.
*
* -Q <seed>
* Seed for random data shuffling (default 1).
*
*
* @author Len Trigg ([email protected])
* @author Eibe Frank ([email protected])
* @version $Revision: 15519 $
*/
public class RegressionByDiscretization
extends SingleClassifierEnhancer implements IntervalEstimator, ConditionalDensityEstimator {
/** for serialization */
static final long serialVersionUID = 5066426153134050378L;
/** The discretization filter. */
protected Discretize m_Discretizer = new Discretize();
/** The number of discretization intervals. */
protected int m_NumBins = 10;
/** The mean values for each Discretized class interval. */
protected double [] m_ClassMeans;
/** The class counts for each Discretized class interval. */
protected int [] m_ClassCounts;
/** Whether to delete empty intervals. */
protected boolean m_DeleteEmptyBins;
/** Mapping to convert indices in case empty bins are deleted. */
protected int[] m_OldIndexToNewIndex;
/** Header of discretized data. */
protected Instances m_DiscretizedHeader = null;
/** Use equal-frequency binning */
protected boolean m_UseEqualFrequency = false;
/** Whether to minimize absolute error, rather than squared error. */
protected boolean m_MinimizeAbsoluteError = false;
/** Which estimator to use (default: histogram) */
protected UnivariateDensityEstimator m_Estimator = new UnivariateEqualFrequencyHistogramEstimator();
/** The original target values in the training data */
protected double[] m_OriginalTargetValues = null;
/** The converted target values in the training data */
protected int[] m_NewTargetValues = null;
/**
* Returns a string describing classifier
* @return a description suitable for
* displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return "A regression scheme that employs any "
+ "classifier on a copy of the data that has the class attribute "
+ "discretized. The predicted value is the expected value of the "
+ "mean class value for each discretized interval (based on the "
+ "predicted probabilities for each interval). This class now "
+ "also supports conditional density estimation by building "
+ "a univariate density estimator from the target values in "
+ "the training data, weighted by the class probabilities. \n\n"
+ "For more information on this process, see\n\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing
* detailed information about the technical background of this class,
* e.g., paper reference or book this class is based on.
*
* @return the technical information about this class
*/
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Eibe Frank and Remco R. Bouckaert");
result.setValue(Field.TITLE, "Conditional Density Estimation with Class Probability Estimators");
result.setValue(Field.BOOKTITLE, "First Asian Conference on Machine Learning");
result.setValue(Field.YEAR, "2009");
result.setValue(Field.PAGES, "65-81");
result.setValue(Field.PUBLISHER, "Springer Verlag");
result.setValue(Field.ADDRESS, "Berlin");
return result;
}
/**
* String describing default classifier.
*
* @return the default classifier classname
*/
protected String defaultClassifierString() {
return "weka.classifiers.trees.J48";
}
/**
* Default constructor.
*/
public RegressionByDiscretization() {
m_Classifier = new weka.classifiers.trees.J48();
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
// class
result.disableAllClasses();
result.disableAllClassDependencies();
result.enable(Capability.NUMERIC_CLASS);
result.enable(Capability.DATE_CLASS);
result.setMinimumNumberInstances(2);
return result;
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @throws Exception if the classifier has not been generated successfully
*/
public void buildClassifier(Instances instances) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(instances);
// remove instances with missing class
instances = new Instances(instances);
instances.deleteWithMissingClass();
// Discretize the training data
m_Discretizer.setIgnoreClass(true);
m_Discretizer.setAttributeIndices("" + (instances.classIndex() + 1));
m_Discretizer.setBins(getNumBins());
m_Discretizer.setUseEqualFrequency(getUseEqualFrequency());
m_Discretizer.setInputFormat(instances);
Instances newTrain = Filter.useFilter(instances, m_Discretizer);
// Should empty bins be deleted?
m_OldIndexToNewIndex = null;
if (m_DeleteEmptyBins) {
// Figure out which classes are empty after discretization
int numNonEmptyClasses = 0;
boolean[] notEmptyClass = new boolean[newTrain.numClasses()];
for (int i = 0; i < newTrain.numInstances(); i++) {
if (!notEmptyClass[(int)newTrain.instance(i).classValue()]) {
numNonEmptyClasses++;
notEmptyClass[(int)newTrain.instance(i).classValue()] = true;
}
}
// Compute new list of non-empty classes and mapping of indices
ArrayList newClassVals = new ArrayList(numNonEmptyClasses);
m_OldIndexToNewIndex = new int[newTrain.numClasses()];
for (int i = 0; i < newTrain.numClasses(); i++) {
if (notEmptyClass[i]) {
m_OldIndexToNewIndex[i] = newClassVals.size();
newClassVals.add(newTrain.classAttribute().value(i));
}
}
// Compute new header information
Attribute newClass = new Attribute(newTrain.classAttribute().name(),
newClassVals);
ArrayList newAttributes = new ArrayList(newTrain.numAttributes());
for (int i = 0; i < newTrain.numAttributes(); i++) {
if (i != newTrain.classIndex()) {
newAttributes.add((Attribute)newTrain.attribute(i).copy());
} else {
newAttributes.add(newClass);
}
}
// Create new header and modify instances
Instances newTrainTransformed = new Instances(newTrain.relationName(),
newAttributes,
newTrain.numInstances());
newTrainTransformed.setClassIndex(newTrain.classIndex());
for (int i = 0; i < newTrain.numInstances(); i++) {
Instance inst = newTrain.instance(i);
newTrainTransformed.add(inst);
newTrainTransformed.lastInstance().
setClassValue(m_OldIndexToNewIndex[(int)inst.classValue()]);
}
newTrain = newTrainTransformed;
}
// Store target values, in case a prediction interval or computation of median is required
m_OriginalTargetValues = new double[instances.numInstances()];
m_NewTargetValues = new int[instances.numInstances()];
for (int i = 0; i < m_OriginalTargetValues.length; i++) {
m_OriginalTargetValues[i] = instances.instance(i).classValue();
m_NewTargetValues[i] = (int)newTrain.instance(i).classValue();
}
m_DiscretizedHeader = new Instances(newTrain, 0);
int numClasses = newTrain.numClasses();
// Calculate the mean value for each bin of the new class attribute
m_ClassMeans = new double [numClasses];
m_ClassCounts = new int [numClasses];
for (int i = 0; i < instances.numInstances(); i++) {
Instance inst = newTrain.instance(i);
if (!inst.classIsMissing()) {
int classVal = (int) inst.classValue();
m_ClassCounts[classVal]++;
m_ClassMeans[classVal] += instances.instance(i).classValue();
}
}
for (int i = 0; i < numClasses; i++) {
if (m_ClassCounts[i] > 0) {
m_ClassMeans[i] /= m_ClassCounts[i];
}
}
if (m_Debug) {
System.out.println("Bin Means");
System.out.println("==========");
for (int i = 0; i < m_ClassMeans.length; i++) {
System.out.println(m_ClassMeans[i]);
}
System.out.println();
}
// Train the sub-classifier
m_Classifier.buildClassifier(newTrain);
}
/**
* Get density estimator for given instance.
*
* @param inst the instance
* @return the univariate density estimator
* @exception Exception if the estimator can't be computed
*/
protected UnivariateDensityEstimator getDensityEstimator(Instance instance, boolean print) throws Exception {
// Initialize estimator
UnivariateDensityEstimator e = (UnivariateDensityEstimator) new SerializedObject(m_Estimator).getObject();
if (e instanceof UnivariateEqualFrequencyHistogramEstimator) {
// Set the number of bins appropriately
((UnivariateEqualFrequencyHistogramEstimator)e).setNumBins(getNumBins());
// Initialize boundaries of equal frequency estimator
for (int i = 0; i < m_OriginalTargetValues.length; i++) {
e.addValue(m_OriginalTargetValues[i], 1.0);
}
// Construct estimator, then initialize statistics, so that only boundaries will be kept
((UnivariateEqualFrequencyHistogramEstimator)e).initializeStatistics();
// Now that boundaries have been determined, we only need to update the bin weights
((UnivariateEqualFrequencyHistogramEstimator)e).setUpdateWeightsOnly(true);
}
// Make sure structure of class attribute correct
m_Discretizer.input(instance);
m_Discretizer.batchFinished();
Instance newInstance = m_Discretizer.output();//(Instance)instance.copy();
if (m_OldIndexToNewIndex != null) {
newInstance.setClassValue(m_OldIndexToNewIndex[(int)newInstance.classValue()]);
}
newInstance.setDataset(m_DiscretizedHeader);
double [] probs = m_Classifier.distributionForInstance(newInstance);
// Add values to estimator
for (int i = 0; i < m_OriginalTargetValues.length; i++) {
e.addValue(m_OriginalTargetValues[i], probs[m_NewTargetValues[i]] *
m_OriginalTargetValues.length / m_ClassCounts[m_NewTargetValues[i]]);
}
// Return estimator
return e;
}
/**
* Returns an N * 2 array, where N is the number of prediction
* intervals. In each row, the first element contains the lower
* boundary of the corresponding prediction interval and the second
* element the upper boundary.
*
* @param inst the instance to make the prediction for.
* @param confidenceLevel the percentage of cases that the interval should cover.
* @return an array of prediction intervals
* @exception Exception if the intervals can't be computed
*/
public double[][] predictIntervals(Instance instance, double confidenceLevel) throws Exception {
// Get density estimator
UnivariateIntervalEstimator e = (UnivariateIntervalEstimator)getDensityEstimator(instance, false);
// Return intervals
return e.predictIntervals(confidenceLevel);
}
/**
* Returns natural logarithm of density estimate for given value based on given instance.
*
* @param inst the instance to make the prediction for.
* @param the value to make the prediction for.
* @return the natural logarithm of the density estimate
* @exception Exception if the intervals can't be computed
*/
public double logDensity(Instance instance, double value) throws Exception {
// Get density estimator
UnivariateDensityEstimator e = getDensityEstimator(instance, true);
// Return estimate
return e.logDensity(value);
}
/**
* Returns a predicted class for the test instance.
*
* @param instance the instance to be classified
* @return predicted class value
* @throws Exception if the prediction couldn't be made
*/
public double classifyInstance(Instance instance) throws Exception {
// Make sure structure of class attribute correct
m_Discretizer.input(instance);
m_Discretizer.batchFinished();
Instance newInstance = m_Discretizer.output();//(Instance)instance.copy();
if (m_OldIndexToNewIndex != null) {
newInstance.setClassValue(m_OldIndexToNewIndex[(int)newInstance.classValue()]);
}
newInstance.setDataset(m_DiscretizedHeader);
double [] probs = m_Classifier.distributionForInstance(newInstance);
if (!m_MinimizeAbsoluteError) {
// Compute actual prediction
double prediction = 0, probSum = 0;
for (int j = 0; j < probs.length; j++) {
prediction += probs[j] * m_ClassMeans[j];
probSum += probs[j];
}
return prediction / probSum;
} else {
// Get density estimator
UnivariateQuantileEstimator e = (UnivariateQuantileEstimator)getDensityEstimator(instance, true);
// Return estimate
return e.predictQuantile(0.5);
}
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration
© 2015 - 2024 Weber Informatics LLC | Privacy Policy