hex.kmeans.KMeans Maven / Gradle / Ivy
package hex.kmeans;
import hex.*;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
/**
* Scalable K-Means++ (KMeans||)
* http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf
* http://www.youtube.com/watch?v=cigXAxV3XcY
*/
public class KMeans extends ClusteringModelBuilder {
// Convergence tolerance
final private double TOLERANCE = 1e-6;
@Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Clustering }; }
public enum Initialization { Random, PlusPlus, Furthest, User }
/** Start the KMeans training Job on an F/J thread. */
@Override protected KMeansDriver trainModelImpl() { return new KMeansDriver(); }
// Called from an http request
public KMeans( KMeansModel.KMeansParameters parms ) { super(parms ); init(false); }
public KMeans( KMeansModel.KMeansParameters parms, Job job) { super(parms,job); init(false); }
public KMeans(boolean startup_once) { super(new KMeansModel.KMeansParameters(),startup_once); }
@Override protected void checkMemoryFootPrint() {
long mem_usage = 8 /*doubles*/ * _parms._k * _train.numCols() * (_parms._standardize ? 2 : 1);
long max_mem = H2O.SELF._heartbeat.get_free_mem();
if (mem_usage > max_mem) {
String msg = "Centroids won't fit in the driver node's memory ("
+ PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem)
+ ") - try reducing the number of columns and/or the number of categorical factors.";
error("_train", msg);
}
}
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();".
*
* Validate K, max_iterations and the number of rows. */
@Override public void init(boolean expensive) {
super.init(expensive);
if( _parms._max_iterations < 0 || _parms._max_iterations > 1e6) error("_max_iterations", " max_iterations must be between 0 and 1e6");
if( _train == null ) return;
if( _parms._init == Initialization.User && _parms._user_points == null )
error("_user_y","Must specify initial cluster centers");
if( null != _parms._user_points ){ // Check dimensions of user-specified centers
Frame user_points = _parms._user_points.get();
if( user_points.numCols() != _train.numCols() - numSpecialCols()) {
error("_user_y","The user-specified points must have the same number of columns (" + (_train.numCols() - numSpecialCols()) + ") as the training observations");
} else if( user_points.numRows() != _parms._k)
error("_user_y","The number of rows in the user-specified points is not equal to k = " + _parms._k);
}
if (expensive && error_count() == 0) checkMemoryFootPrint();
}
// ----------------------
private final class KMeansDriver extends Driver {
private String[][] _isCats; // Categorical columns
// Initialize cluster centers
double[][] initial_centers( KMeansModel model, final Vec[] vecs, final double[] means, final double[] mults, final int[] modes ) {
// Categoricals use a different distance metric than numeric columns.
model._output._categorical_column_count=0;
_isCats = new String[vecs.length][];
for( int v=0; v= _parms._max_iterations) return true;
// Compute average change in standardized cluster centers
if( oldCenters==null ) return false; // No prior iteration, not stopping
double average_change = 0;
for( int clu=0; clu<_parms._k; clu++ )
average_change += hex.genmodel.GenModel.KMeans_distance(oldCenters[clu],newCenters[clu],_isCats,null,null);
average_change /= _parms._k; // Average change per cluster
model._output._avg_centroids_chg = ArrayUtils.copyAndFillOf(
model._output._avg_centroids_chg,
model._output._avg_centroids_chg.length+1, average_change);
model._output._training_time_ms = ArrayUtils.copyAndFillOf(
model._output._training_time_ms,
model._output._training_time_ms.length+1, System.currentTimeMillis());
return average_change < TOLERANCE;
}
// Main worker thread
@Override
public void compute2() {
KMeansModel model = null;
try {
Scope.enter();
init(true);
// Do lock even before checking the errors, since this block is finalized by unlock
// (not the best solution, but the code is more readable)
_parms.read_lock_frames(_job); // Fetch & read-lock input frames
// Something goes wrong
if( error_count() > 0 ) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(KMeans.this);
// The model to be built
model = new KMeansModel(dest(), _parms, new KMeansModel.KMeansOutput(KMeans.this));
model.delete_and_lock(_job);
//
final Vec vecs[] = _train.vecs();
// mults & means for standardization
final double[] means = _train.means(); // means are used to impute NAs
final double[] mults = _parms._standardize ? _train.mults() : null;
final int [] impute_cat = new int[vecs.length];
for(int i = 0; i < vecs.length; i++)
impute_cat[i] = vecs[i].isNumeric() ? -1 : DataInfo.imputeCat(vecs[i]);
model._output._normSub = means;
model._output._normMul = mults;
// Initialize cluster centers and standardize if requested
double[][] centers = initial_centers(model,vecs,means,mults,impute_cat);
if( centers==null ) return; // Stopped/cancelled during center-finding
double[][] oldCenters = null;
// ---
// Run the main KMeans Clustering loop
// Stop after enough iterations or average_change < TOLERANCE
model._output._iterations = 0; // Loop ends only when iterations > max_iterations with strict inequality
while( !isDone(model,centers,oldCenters) ) {
Lloyds task = new Lloyds(centers,means,mults,impute_cat,_isCats, _parms._k, hasWeightCol()).doAll(vecs);
// Pick the max categorical level for cluster center
max_cats(task._cMeans,task._cats,_isCats);
// Handle the case where some centers go dry. Rescue only 1 cluster
// per iteration ('cause we only tracked the 1 worst row)
if( cleanupBadClusters(task,vecs,centers,means,mults,impute_cat) ) continue;
// Compute model stats; update standardized cluster centers
oldCenters = centers;
centers = computeStatsFillModel(task, model, vecs, means, mults, impute_cat);
model.update(_job); // Update model in K/V store
_job.update(1); // One unit of work
if (model._parms._score_each_iteration)
Log.info(model._output._model_summary);
}
Log.info(model._output._model_summary);
// Log.info(model._output._scoring_history);
// Log.info(((ModelMetricsClustering)model._output._training_metrics).createCentroidStatsTable().toString());
// At the end: validation scoring (no need to gather scoring history)
if (_valid != null) {
model.score(_parms.valid()).delete(); //this appends a ModelMetrics on the validation set
model._output._validation_metrics = ModelMetrics.getFromDKV(model,_parms.valid());
model.update(_job); // Update model in K/V store
}
} finally {
if( model != null ) model.unlock(_job);
_parms.read_unlock_frames(_job);
Scope.exit();
}
tryComplete();
}
private TwoDimTable createModelSummaryTable(KMeansModel.KMeansOutput output) {
List colHeaders = new ArrayList<>();
List colTypes = new ArrayList<>();
List colFormat = new ArrayList<>();
colHeaders.add("Number of Rows"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Number of Clusters"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Number of Categorical Columns"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Number of Iterations"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Within Cluster Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Total Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Between Cluster Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f");
final int rows = 1;
TwoDimTable table = new TwoDimTable(
"Model Summary", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
int col = 0;
table.set(row, col++, Math.round(_train.numRows() * (hasWeightCol() ? _train.lastVec().mean() : 1)));
table.set(row, col++, output._centers_raw.length);
table.set(row, col++, output._categorical_column_count);
table.set(row, col++, output._iterations);
table.set(row, col++, output._tot_withinss);
table.set(row, col++, output._totss);
table.set(row, col++, output._betweenss);
return table;
}
private TwoDimTable createScoringHistoryTable(KMeansModel.KMeansOutput output) {
List colHeaders = new ArrayList<>();
List colTypes = new ArrayList<>();
List colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Iteration"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Avg. Change of Std. Centroids"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Within Cluster Sum Of Squares"); colTypes.add("double"); colFormat.add("%.5f");
final int rows = output._avg_centroids_chg.length;
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
for( int i = 0; i {
// IN
final double[] _means, _mults;
final int[] _modes;
final String[][] _isCats;
final int[] _card;
// OUT
double _tss;
double[] _gc; // Grand center (mean of cols)
TotSS(double[] means, double[] mults, int[] modes, String[][] isCats, int[] card) {
_means = means;
_mults = mults;
_modes = modes;
_tss = 0;
_isCats = isCats;
_card = card;
// Mean of numeric col is zero when standardized
_gc = mults!=null ? new double[means.length] : Arrays.copyOf(means, means.length);
for(int i=0; i {
// IN
double[][] _centers;
double[] _means, _mults; // Standardization
int[] _modes; // Imputation of missing categoricals
final String[][] _isCats;
// OUT
double _sqr;
SumSqr( double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats ) {
_centers = centers;
_means = means;
_mults = mults;
_modes = modes;
_isCats = isCats;
}
@Override public void map(Chunk[] cs) {
double[] values = new double[cs.length];
ClusterDist cd = new ClusterDist();
for( int row = 0; row < cs[0]._len; row++ ) {
data(values, cs, row, _means, _mults, _modes);
_sqr += minSqr(_centers, values, _isCats, cd);
}
_means = _mults = null;
_modes = null;
_centers = null;
}
@Override public void reduce(SumSqr other) { _sqr += other._sqr; }
}
// -------------------------------------------------------------------------
// Sample rows with increasing probability the farther they are from any
// cluster center.
private static class Sampler extends MRTask {
// IN
double[][] _centers;
double[] _means, _mults; // Standardization
int[] _modes; // Imputation of missing categoricals
final String[][] _isCats;
final double _sqr; // Min-square-error
final double _probability; // Odds to select this point
final long _seed;
boolean _hasWeight;
// OUT
double[][] _sampled; // New cluster centers
Sampler( double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, double sqr, double prob, long seed, boolean hasWeight ) {
_centers = centers;
_means = means;
_mults = mults;
_modes = modes;
_isCats = isCats;
_sqr = sqr;
_probability = prob;
_seed = seed;
_hasWeight = hasWeight;
}
@Override public void map(Chunk[] cs) {
int N = cs.length - (_hasWeight?1:0);
double[] values = new double[N];
ArrayList list = new ArrayList<>();
Random rand = RandomUtils.getRNG(0);
ClusterDist cd = new ClusterDist();
for( int row = 0; row < cs[0]._len; row++ ) {
rand.setSeed(_seed + cs[0].start()+row);
data(values, cs, row, _means, _mults, _modes);
double sqr = minSqr(_centers, values, _isCats, cd);
if( _probability * sqr > rand.nextDouble() * _sqr )
list.add(values.clone());
}
_sampled = new double[list.size()][];
list.toArray(_sampled);
_centers = null;
_means = _mults = null;
_modes = null;
}
@Override public void reduce(Sampler other) {
_sampled = ArrayUtils.append(_sampled, other._sampled);
}
}
// ---------------------------------------
// A Lloyd's pass:
// Find nearest cluster center for every point
// Compute new mean/center & variance & rows for each cluster
// Compute distance between clusters
// Compute total sqr distance
private static class Lloyds extends MRTask {
// IN
double[][] _centers;
double[] _means, _mults; // Standardization
int[] _modes; // Imputation of missing categoricals
final int _k;
final String[][] _isCats;
boolean _hasWeight;
// OUT
double[][] _cMeans; // Means for each cluster
long[/*k*/][/*features*/][/*nfactors*/] _cats; // Histogram of cat levels
double[] _cSqr; // Sum of squares for each cluster
long[] _size; // Number of rows in each cluster
long _worst_row; // Row with max err
double _worst_err; // Max-err-row's max-err
Lloyds( double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight ) {
_centers = centers;
_means = means;
_mults = mults;
_modes = modes;
_isCats = isCats;
_k = k;
_hasWeight = hasWeight;
}
@Override public void map(Chunk[] cs) {
int N = cs.length - (_hasWeight ? 1:0);
assert _centers[0].length==N;
_cMeans = new double[_k][N];
_cSqr = new double[_k];
_size = new long[_k];
// Space for cat histograms
_cats = new long[_k][N][];
for( int clu=0; clu< _k; clu++ )
for( int col=0; col _worst_err) { _worst_err = cd._dist; _worst_row = cs[0].start()+row; }
}
// Scale back down to local mean
for( int clu = 0; clu < _k; clu++ )
if( _size[clu] != 0 ) ArrayUtils.div(_cMeans[clu], _size[clu]);
_centers = null;
_means = _mults = null;
_modes = null;
}
@Override public void reduce(Lloyds mr) {
for( int clu = 0; clu < _k; clu++ ) {
long ra = _size[clu];
long rb = mr._size[clu];
double[] ma = _cMeans[clu];
double[] mb = mr._cMeans[clu];
for( int c = 0; c < ma.length; c++ ) // Recursive mean
if( ra+rb > 0 ) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb);
}
ArrayUtils.add(_cats, mr._cats);
ArrayUtils.add(_cSqr, mr._cSqr);
ArrayUtils.add(_size, mr._size);
// track global worst-row
if( _worst_err < mr._worst_err) { _worst_err = mr._worst_err; _worst_row = mr._worst_row; }
}
}
// A pair result: nearest cluster center and the square distance
private static final class ClusterDist { int _cluster; double _dist; }
private static double minSqr(double[][] centers, double[] point, String[][] isCats, ClusterDist cd) {
return closest(centers, point, isCats, cd, centers.length)._dist;
}
private static double minSqr(double[][] centers, double[] point, String[][] isCats, ClusterDist cd, int count) {
return closest(centers,point,isCats,cd,count)._dist;
}
private static ClusterDist closest(double[][] centers, double[] point, String[][] isCats, ClusterDist cd) {
return closest(centers, point, isCats, cd, centers.length);
}
/** Return both nearest of N cluster center/centroids, and the square-distance. */
private static ClusterDist closest(double[][] centers, double[] point, String[][] isCats, ClusterDist cd, int count) {
int min = -1;
double minSqr = Double.MAX_VALUE;
for( int cluster = 0; cluster < count; cluster++ ) {
double sqr = hex.genmodel.GenModel.KMeans_distance(centers[cluster],point,isCats,null,null);
if( sqr < minSqr ) { // Record nearest cluster
min = cluster;
minSqr = sqr;
}
}
cd._cluster = min; // Record nearest cluster
cd._dist = minSqr; // Record square-distance
return cd; // Return for flow-coding
}
// KMeans++ re-clustering
private static double[][] recluster(double[][] points, Random rand, int N, Initialization init, String[][] isCats) {
double[][] res = new double[N][];
res[0] = points[0];
int count = 1;
ClusterDist cd = new ClusterDist();
switch( init ) {
case Random:
break;
case PlusPlus: { // k-means++
while( count < res.length ) {
double sum = 0;
for (double[] point1 : points) sum += minSqr(res, point1, isCats, cd, count);
for (double[] point : points) {
if (minSqr(res, point, isCats, cd, count) >= rand.nextDouble() * sum) {
res[count++] = point;
break;
}
}
}
break;
}
case Furthest: { // Takes cluster center further from any already chosen ones
while( count < res.length ) {
double max = 0;
int index = 0;
for( int i = 0; i < points.length; i++ ) {
double sqr = minSqr(res, points[i], isCats, cd, count);
if( sqr > max ) {
max = sqr;
index = i;
}
}
res[count++] = points[index];
}
break;
}
default: throw H2O.fail();
}
return res;
}
private void randomRow(Vec[] vecs, Random rand, double[] center, double[] means, double[] mults, int[] modes) {
long row = Math.max(0, (long) (rand.nextDouble() * vecs[0].length()) - 1);
data(center, vecs, row, means, mults, modes);
}
// Pick most common cat level for each cluster_centers' cat columns
private static double[][] max_cats(double[][] centers, long[][][] cats, String[][] isCats) {
for( int clu = 0; clu < centers.length; clu++ )
for( int col = 0; col < centers[0].length; col++ )
if( isCats[col] != null )
centers[clu][col] = ArrayUtils.maxIndex(cats[clu][col]);
return centers;
}
private static double[][] destandardize(double[][] centers, String[][] isCats, double[] means, double[] mults) {
int K = centers.length;
int N = centers[0].length;
double[][] value = new double[K][N];
for( int clu = 0; clu < K; clu++ ) {
System.arraycopy(centers[clu],0,value[clu],0,N);
if( mults!=null ) { // Reverse standardization
for( int col = 0; col < N; col++)
if( isCats[col] == null )
value[clu][col] = value[clu][col] / mults[col] + means[col];
}
}
return value;
}
private static void data(double[] values, Vec[] vecs, long row, double[] means, double[] mults, int[] modes) {
for( int i = 0; i < values.length; i++ ) {
double d = vecs[i].at(row);
values[i] = data(d, i, means, mults, modes);
}
}
private static void data(double[] values, Chunk[] chks, int row, double[] means, double[] mults, int[] modes) {
for( int i = 0; i < values.length; i++ ) {
double d = chks[i].atd(row);
values[i] = data(d, i, means, mults, modes);
}
}
/**
* Takes mean if NaN, standardize if requested.
*/
private static double data(double d, int i, double[] means, double[] mults, int[] modes) {
if(modes[i] == -1) { // Mode = -1 for non-categorical cols
if( Double.isNaN(d) )
d = means[i];
if( mults != null ) {
d -= means[i];
d *= mults[i];
}
} else {
if( Double.isNaN(d) )
d = modes[i];
}
return d;
}
/**
* This helper creates a ModelMetricsClustering from a trained model
* @param model, must contain valid statistics from training, such as _betweenss etc.
*/
private ModelMetricsClustering makeTrainingMetrics(KMeansModel model) {
ModelMetricsClustering mm = new ModelMetricsClustering(model, model._parms.train());
mm._size = model._output._size;
mm._withinss = model._output._withinss;
mm._betweenss = model._output._betweenss;
mm._totss = model._output._totss;
mm._tot_withinss = model._output._tot_withinss;
model.addMetrics(mm);
return mm;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy