weka.classifiers.mi.MIEMDD Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of weka-stable Show documentation
Show all versions of weka-stable Show documentation
The Waikato Environment for Knowledge Analysis (WEKA), a machine
learning workbench. This is the stable version. Apart from bugfixes, this version
does not receive any other updates.
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* MIEMDD.java
* Copyright (C) 2005 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.mi;
import weka.classifiers.RandomizableClassifier;
import weka.core.Capabilities;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.MultiInstanceCapabilitiesHandler;
import weka.core.Optimization;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.Capabilities.Capability;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Normalize;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
import weka.filters.unsupervised.attribute.Standardize;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
/**
* EMDD model builds heavily upon Dietterich's Diverse Density (DD) algorithm.
* It is a general framework for MI learning of converting the MI problem to a single-instance setting using EM. In this implementation, we use most-likely cause DD model and only use 3 random selected postive bags as initial starting points of EM.
*
* For more information see:
*
* Qi Zhang, Sally A. Goldman: EM-DD: An Improved Multiple-Instance Learning Technique. In: Advances in Neural Information Processing Systems 14, 1073-108, 2001.
*
*
* BibTeX:
*
* @inproceedings{Zhang2001,
* author = {Qi Zhang and Sally A. Goldman},
* booktitle = {Advances in Neural Information Processing Systems 14},
* pages = {1073-108},
* publisher = {MIT Press},
* title = {EM-DD: An Improved Multiple-Instance Learning Technique},
* year = {2001}
* }
*
*
*
* Valid options are:
*
* -N <num>
* Whether to 0=normalize/1=standardize/2=neither.
* (default 1=standardize)
*
* -S <num>
* Random number seed.
* (default 1)
*
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
*
*
* @author Eibe Frank ([email protected])
* @author Lin Dong ([email protected])
* @version $Revision: 9144 $
*/
public class MIEMDD
extends RandomizableClassifier
implements OptionHandler, MultiInstanceCapabilitiesHandler,
TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = 3899547154866223734L;
/** The index of the class attribute */
protected int m_ClassIndex;
protected double[] m_Par;
/** The number of the class labels */
protected int m_NumClasses;
/** Class labels for each bag */
protected int[] m_Classes;
/** MI data */
protected double[][][] m_Data;
/** All attribute names */
protected Instances m_Attributes;
/** MI data */
protected double[][] m_emData;
/** The filter used to standardize/normalize all values. */
protected Filter m_Filter = null;
/** Whether to normalize/standardize/neither, default:standardize */
protected int m_filterType = FILTER_STANDARDIZE;
/** Normalize training data */
public static final int FILTER_NORMALIZE = 0;
/** Standardize training data */
public static final int FILTER_STANDARDIZE = 1;
/** No normalization/standardization */
public static final int FILTER_NONE = 2;
/** The filter to apply to the training data */
public static final Tag[] TAGS_FILTER = {
new Tag(FILTER_NORMALIZE, "Normalize training data"),
new Tag(FILTER_STANDARDIZE, "Standardize training data"),
new Tag(FILTER_NONE, "No normalization/standardization"),
};
/** The filter used to get rid of missing values. */
protected ReplaceMissingValues m_Missing = new ReplaceMissingValues();
/**
* Returns a string describing this filter
*
* @return a description of the filter suitable for
* displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return
"EMDD model builds heavily upon Dietterich's Diverse Density (DD) "
+ "algorithm.\nIt is a general framework for MI learning of converting "
+ "the MI problem to a single-instance setting using EM. In this "
+ "implementation, we use most-likely cause DD model and only use 3 "
+ "random selected postive bags as initial starting points of EM.\n\n"
+ "For more information see:\n\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing
* detailed information about the technical background of this class,
* e.g., paper reference or book this class is based on.
*
* @return the technical information about this class
*/
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Qi Zhang and Sally A. Goldman");
result.setValue(Field.TITLE, "EM-DD: An Improved Multiple-Instance Learning Technique");
result.setValue(Field.BOOKTITLE, "Advances in Neural Information Processing Systems 14");
result.setValue(Field.YEAR, "2001");
result.setValue(Field.PAGES, "1073-108");
result.setValue(Field.PUBLISHER, "MIT Press");
return result;
}
/**
* Returns an enumeration describing the available options
*
* @return an enumeration of all the available options
*/
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option(
"\tWhether to 0=normalize/1=standardize/2=neither.\n"
+ "\t(default 1=standardize)",
"N", 1, "-N "));
Enumeration enm = super.listOptions();
while (enm.hasMoreElements())
result.addElement(enm.nextElement());
return result.elements();
}
/**
* Parses a given list of options.
*
* Valid options are:
*
* -N <num>
* Whether to 0=normalize/1=standardize/2=neither.
* (default 1=standardize)
*
* -S <num>
* Random number seed.
* (default 1)
*
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
*
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER));
} else {
setFilterType(new SelectedTag(FILTER_STANDARDIZE, TAGS_FILTER));
}
super.setOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String[] getOptions() {
Vector result;
String[] options;
int i;
result = new Vector();
options = super.getOptions();
for (i = 0; i < options.length; i++)
result.add(options[i]);
result.add("-N");
result.add("" + m_filterType);
return (String[]) result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String filterTypeTipText() {
return "The filter type for transforming the training data.";
}
/**
* Gets how the training data will be transformed. Will be one of
* FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE.
*
* @return the filtering mode
*/
public SelectedTag getFilterType() {
return new SelectedTag(m_filterType, TAGS_FILTER);
}
/**
* Sets how the training data will be transformed. Should be one of
* FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE.
*
* @param newType the new filtering mode
*/
public void setFilterType(SelectedTag newType) {
if (newType.getTags() == TAGS_FILTER) {
m_filterType = newType.getSelectedTag().getID();
}
}
private class OptEng
extends Optimization {
/**
* Evaluate objective function
* @param x the current values of variables
* @return the value of the objective function
*/
protected double objectiveFunction(double[] x){
double nll = 0; // -LogLikelihood
for (int i=0; i0.01*pre_nll && iterationCount<10) { //stop condition
while (nll < pre_nll && iterationCount < 10) {
iterationCount++;
pre_nll = nll;
if (m_Debug)
System.out.println("\niteration: "+iterationCount);
//E-step (find one instance from each bag with max likelihood )
for (int i = 0; i < m_Data.length; i++) { //for each bag
int insIndex = findInstance(i, x);
for (int att = 0; att < m_Data[0].length; att++) //for each attribute
m_emData[i][att] = m_Data[i][att][insIndex];
}
if (m_Debug)
System.out.println("E-step for new H' finished");
//M-step
opt = new OptEng();
tmp = opt.findArgmin(x, b);
while (tmp == null) {
tmp = opt.getVarbValues();
if (m_Debug)
System.out.println("200 iterations finished, not enough!");
tmp = opt.findArgmin(tmp, b);
}
nll = opt.getMinFunction();
pre_x = x;
x = tmp; // update hypothesis
//keep the track of the best target point which has the minimum nll
/* if (nll < bestnll) {
bestnll = nll;
m_Par = tmp;
if (m_Debug)
System.out.println("!!!!!!!!!!!!!!!!Smaller NLL found: " + nll);
}*/
//if (m_Debug)
//System.out.println(exIdx+" "+p+": "+nll+" "+pre_nll+" " +bestnll);
} //converged for one instance
//evaluate the hypothesis on the training data and
//keep the track of the hypothesis with minimum error on training data
double distribution[] = new double[2];
int error = 0;
if (nll > pre_nll)
m_Par = pre_x;
else
m_Par = x;
for (int i = 0; i= 0.5 && m_Classes[i] == 0)
error++;
else if (distribution[1]<0.5 && m_Classes[i] == 1)
error++;
}
if (error < min_error) {
best_hypothesis = m_Par;
min_error = error;
if (nll > pre_nll)
bestnll = pre_nll;
else
bestnll = nll;
if (m_Debug)
System.out.println("error= "+ error +" nll= " + bestnll);
}
}
if (m_Debug) {
System.out.println(exIdx+ ": ---------------------------");
System.out.println("current minimum error= "+min_error+" nll= "+bestnll);
}
}
m_Par = best_hypothesis;
}
/**
* given x, find the instance in ith bag with the most likelihood
* probability, which is most likely to responsible for the label of the
* bag For a positive bag, find the instance with the maximal probability
* of being positive For a negative bag, find the instance with the minimal
* probability of being negative
*
* @param i the bag index
* @param x the current values of variables
* @return index of the instance in the bag
*/
protected int findInstance(int i, double[] x){
double min=Double.MAX_VALUE;
int insIndex=0;
int nI = m_Data[i][0].length; // numInstances in ith bag
for (int j=0; j
© 2015 - 2025 Weber Informatics LLC | Privacy Policy