All Downloads are FREE. Search and download functionalities are using the official Maven repository.

ciir.umass.edu.eval.Evaluator Maven / Gradle / Ivy

There is a newer version: 2.10.1
Show newest version
/*===============================================================================
 * Copyright (c) 2010-2016 University of Massachusetts.  All Rights Reserved.
 *
 * Use of the RankLib package is subject to the terms of the software license set
 * forth in the LICENSE file included with this software, and also available at
 * http://people.cs.umass.edu/~vdang/ranklib_license.html
 *===============================================================================
 */

package ciir.umass.edu.eval;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;

import ciir.umass.edu.features.FeatureManager;
import ciir.umass.edu.features.LinearNormalizer;
import ciir.umass.edu.features.Normalizer;
import ciir.umass.edu.features.SumNormalizor;
import ciir.umass.edu.features.ZScoreNormalizor;
import ciir.umass.edu.learning.CoorAscent;
import ciir.umass.edu.learning.DataPoint;
import ciir.umass.edu.learning.LinearRegRank;
import ciir.umass.edu.learning.RankList;
import ciir.umass.edu.learning.Ranker;
import ciir.umass.edu.learning.RankerFactory;
import ciir.umass.edu.learning.RankerTrainer;
import ciir.umass.edu.learning.RankerType;
import ciir.umass.edu.learning.boosting.AdaRank;
import ciir.umass.edu.learning.boosting.RankBoost;
import ciir.umass.edu.learning.neuralnet.ListNet;
import ciir.umass.edu.learning.neuralnet.Neuron;
import ciir.umass.edu.learning.neuralnet.RankNet;
import ciir.umass.edu.learning.tree.LambdaMART;
import ciir.umass.edu.learning.tree.RFRanker;
import ciir.umass.edu.metric.ERRScorer;
import ciir.umass.edu.metric.METRIC;
import ciir.umass.edu.metric.MetricScorer;
import ciir.umass.edu.metric.MetricScorerFactory;
import ciir.umass.edu.utilities.FileUtils;
import ciir.umass.edu.utilities.MergeSorter;
import ciir.umass.edu.utilities.MyThreadPool;
import ciir.umass.edu.utilities.RankLibError;
import ciir.umass.edu.utilities.SimpleMath;

/**
 * @author vdang
 *
 * This class is meant to provide the interface to run and compare different ranking algorithms.
 * It lets users specify general parameters (e.g. what algorithm to run, training/testing/validating
 *  data, etc.) as well as algorithm-specific parameters. Type "java -jar bin/RankLib.jar" at the
 *  command-line to see all the options.
 */
public class Evaluator {
    private static final Logger logger = Logger.getLogger(Evaluator.class.getName());

    /**
     * @param args
     */
    public static void main(final String[] args) {

        final String[] rType = new String[] { "MART", "RankNet", "RankBoost", "AdaRank", "Coordinate Ascent", "LambdaRank", "LambdaMART",
                "ListNet", "Random Forests", "Linear Regression" };
        final RankerType[] rType2 = new RankerType[] { RankerType.MART, RankerType.RANKNET, RankerType.RANKBOOST, RankerType.ADARANK,
                RankerType.COOR_ASCENT, RankerType.LAMBDARANK, RankerType.LAMBDAMART, RankerType.LISTNET, RankerType.RANDOM_FOREST,
                RankerType.LINEAR_REGRESSION };

        String trainFile = "";
        String featureDescriptionFile = "";
        float ttSplit = 0;//train-test split
        float tvSplit = 0;//train-validation split
        int foldCV = -1;
        String validationFile = "";
        String testFile = "";
        final List testFiles = new ArrayList<>();
        int rankerType = 4;
        String trainMetric = "ERR@10";
        String testMetric = "";
        Evaluator.normalize = false;
        String savedModelFile = "";
        final List savedModelFiles = new ArrayList<>();
        String kcvModelDir = "";
        String kcvModelFile = "";
        String rankFile = "";
        String prpFile = "";

        int nThread = -1; // nThread = #cpu-cores
        //for my personal use
        String indriRankingFile = "";
        String scoreFile = "";

        if (args.length < 2) {
            logger.info(() -> "Usage: java -jar RankLib.jar ");
            logger.info(() -> "Params:");
            logger.info(() -> "  [+] Training (+ tuning and evaluation)");
            logger.info(() -> "\t-train \t\tTraining data");
            logger.info(() -> "\t-ranker \t\tSpecify which ranking algorithm to use");
            logger.info(() -> "\t\t\t\t0: MART (gradient boosted regression tree)");
            logger.info(() -> "\t\t\t\t1: RankNet");
            logger.info(() -> "\t\t\t\t2: RankBoost");
            logger.info(() -> "\t\t\t\t3: AdaRank");
            logger.info(() -> "\t\t\t\t4: Coordinate Ascent");
            logger.info(() -> "\t\t\t\t6: LambdaMART");
            logger.info(() -> "\t\t\t\t7: ListNet");
            logger.info(() -> "\t\t\t\t8: Random Forests");
            logger.info(() -> "\t\t\t\t9: Linear regression (L2 regularization)");
            logger.info(
                    () -> "\t[ -feature  ]\tFeature description file: list features to be considered by the learner, each on a separate line");
            logger.info(() -> "\t\t\t\tIf not specified, all features will be used.");
            logger.info(() -> "\t[ -metric2t  ]\tMetric to optimize on the training data.  "
                    + "Supported: MAP, NDCG@k, DCG@k, P@k, RR@k, ERR@k (default=ERR@10)");
            logger.info(() -> "\t[ -gmax  ]\tDirectory for models trained via cross-validation (default=not-save)");
            logger.info(
                    () -> "\t[ -kcvmn  ]\tName for model learned in each fold. It will be prefix-ed with the fold-number (default=empty)");

            logger.info(() -> "    [-] RankNet-specific parameters");
            logger.info(() -> "\t[ -epoch  ]\t\tThe number of epochs to train (default=" + RankNet.nIteration + ")");
            logger.info(() -> "\t[ -layer  ]\tThe number of hidden layers (default=" + RankNet.nHiddenLayer + ")");
            logger.info(() -> "\t[ -node  ]\tThe number of hidden nodes per layer (default=" + RankNet.nHiddenNodePerLayer + ")");
            logger.info(() -> "\t[ -lr  ]\t\tLearning rate (default="
                    + (new DecimalFormat("###.########")).format(RankNet.learningRate) + ")");

            logger.info(() -> "    [-] RankBoost-specific parameters");
            logger.info(() -> "\t[ -round  ]\t\tThe number of rounds to train (default=" + RankBoost.nIteration + ")");
            logger.info(() -> "\t[ -tc  ]\t\tNumber of threshold candidates to search. -1 to use all feature values (default="
                    + RankBoost.nThreshold + ")");

            logger.info(() -> "    [-] AdaRank-specific parameters");
            logger.info(() -> "\t[ -round  ]\t\tThe number of rounds to train (default=" + AdaRank.nIteration + ")");
            logger.info(() -> "\t[ -noeq ]\t\tTrain without enqueuing too-strong features (default=unspecified)");
            logger.info(
                    () -> "\t[ -tolerance  ]\tTolerance between two consecutive rounds of learning (default=" + AdaRank.tolerance + ")");
            logger.info(() -> "\t[ -max  ]\tThe maximum number of times a feature can be consecutively selected "
                    + "without changing performance (default=" + AdaRank.maxSelCount + ")");

            logger.info(() -> "    [-] Coordinate Ascent-specific parameters");
            logger.info(() -> "\t[ -r  ]\t\tThe number of random restarts (default=" + CoorAscent.nRestart + ")");
            logger.info(() -> "\t[ -i  ]\tThe number of iterations to search in each dimension (default="
                    + CoorAscent.nMaxIteration + ")");
            logger.info(() -> "\t[ -tolerance  ]\tPerformance tolerance between two solutions (default=" + CoorAscent.tolerance + ")");
            logger.info(() -> "\t[ -reg  ]\tRegularization parameter (default=no-regularization)");

            logger.info(() -> "    [-] {MART, LambdaMART}-specific parameters");
            logger.info(() -> "\t[ -tree  ]\t\tNumber of trees (default=" + LambdaMART.nTrees + ")");
            logger.info(() -> "\t[ -leaf  ]\t\tNumber of leaves for each tree (default=" + LambdaMART.nTreeLeaves + ")");
            logger.info(() -> "\t[ -shrinkage  ]\tShrinkage, or learning rate (default=" + LambdaMART.learningRate + ")");
            logger.info(() -> "\t[ -tc  ]\t\tNumber of threshold candidates for tree spliting. -1 to use all feature values (default="
                    + LambdaMART.nThreshold + ")");
            logger.info(() -> "\t[ -mls  ]\t\tMin leaf support -- minimum % of docs each leaf has to contain (default="
                    + LambdaMART.minLeafSupport + ")");
            logger.info(
                    () -> "\t[ -estop  ]\t\tStop early when no improvement is observed on validaton data in e consecutive rounds (default="
                            + LambdaMART.nRoundToStopEarly + ")");

            logger.info(() -> "    [-] ListNet-specific parameters");
            logger.info(() -> "\t[ -epoch  ]\t\tThe number of epochs to train (default=" + ListNet.nIteration + ")");
            logger.info(() -> "\t[ -lr  ]\t\tLearning rate (default="
                    + (new DecimalFormat("###.########")).format(ListNet.learningRate) + ")");

            logger.info(() -> "    [-] Random Forests-specific parameters");
            logger.info(() -> "\t[ -bag  ]\t\tNumber of bags (default=" + RFRanker.nBag + ")");
            logger.info(() -> "\t[ -srate  ]\t\tSub-sampling rate (default=" + RFRanker.subSamplingRate + ")");
            logger.info(() -> "\t[ -frate  ]\t\tFeature sampling rate (default=" + RFRanker.featureSamplingRate + ")");
            final int type = (RFRanker.rType.ordinal() - RankerType.MART.ordinal());
            logger.info(() -> "\t[ -rtype  ]\tRanker to bag (default=" + type + ", i.e. " + rType[type] + ")");
            logger.info(() -> "\t[ -tree  ]\t\tNumber of trees in each bag (default=" + RFRanker.nTrees + ")");
            logger.info(() -> "\t[ -leaf  ]\t\tNumber of leaves for each tree (default=" + RFRanker.nTreeLeaves + ")");
            logger.info(() -> "\t[ -shrinkage  ]\tShrinkage, or learning rate (default=" + RFRanker.learningRate + ")");
            logger.info(() -> "\t[ -tc  ]\t\tNumber of threshold candidates for tree spliting. -1 to use all feature values (default="
                    + RFRanker.nThreshold + ")");
            logger.info(() -> "\t[ -mls  ]\t\tMin leaf support -- minimum % of docs each leaf has to contain (default="
                    + RFRanker.minLeafSupport + ")");

            logger.info(() -> "    [-] Linear Regression-specific parameters");
            logger.info(() -> "\t[ -L2  ]\t\tL2 regularization parameter (default=" + LinearRegRank.lambda + ")");

            logger.info(() -> "  [+] Testing previously saved models");
            logger.info(() -> "\t-load \t\tThe model to load");
            logger.info(() -> "\t\t\t\tMultiple -load can be used to specify models from multiple folds (in increasing order),");
            logger.info(() -> "\t\t\t\t  in which case the test/rank data will be partitioned accordingly.");
            logger.info(() -> "\t-test \t\tTest data to evaluate the model(s) (specify either this or -rank but not both)");
            logger.info(() -> "\t-rank \t\tRank the samples in the specified file (specify either this or -test but not both)");
            logger.info(() -> "\t[ -metric2T  ]\tMetric to evaluate on the test data (default=ERR@10)");
            logger.info(() -> "\t[ -gmax 




© 2015 - 2024 Weber Informatics LLC | Privacy Policy