All Downloads are FREE. Search and download functionalities are using the official Maven repository.

water.bindings.proxies.retrofit.Grid Maven / Gradle / Ivy

There is a newer version: 3.46.0.5
Show newest version
/*
 * This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
 * Copyright 2016 H2O.ai;  Apache License Version 2.0 (see LICENSE for details)
 */
package water.bindings.proxies.retrofit;

import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;

public interface Grid {

  /** 
   * Run grid search for XGBoost model.
   *   @param ntrees (same as n_estimators) Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
   *   @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
   *   @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
   *   @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
   *   @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
   *   @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
   *   @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
   *   @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
   *   @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
   *   @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
   *   @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
   *                               constraint and -1 to specify a decreasing constraint.
   *   @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
   *   @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
   *                                to happen
   *   @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
   *                to happen
   *   @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
   *                  (-nthreads parameter). Defaults to maximum available
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param max_bins For tree_method=hist only: maximum number of bins
   *   @param max_leaves For tree_method=hist only: maximum number of leaves
   *   @param tree_method Tree method
   *   @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
   *   @param booster Booster type
   *   @param reg_lambda L2 regularization
   *   @param reg_alpha L1 regularization
   *   @param quiet_mode Enable quiet mode
   *   @param sample_type For booster=dart only: sample_type
   *   @param normalize_type For booster=dart only: normalize_type
   *   @param rate_drop For booster=dart only: rate_drop (0..1)
   *   @param one_drop For booster=dart only: one_drop
   *   @param skip_drop For booster=dart only: skip_drop (0..1)
   *   @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
   *   @param backend Backend. By default (auto), a GPU is used if available.
   *   @param gpu_id Which GPU(s) to use.
   *   @param interaction_constraints A set of allowed column interactions.
   *   @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
   *                           with negative labels on gradient calculation. Useful for imbalanced problems.
   *   @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
   *   @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
   *                                 if scoring is frequent (eg. each iteration).
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/xgboost")
  Call trainXgboost(
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("min_child_weight") double min_child_weight,
    @Field("learn_rate") double learn_rate,
    @Field("eta") double eta,
    @Field("sample_rate") double sample_rate,
    @Field("subsample") double subsample,
    @Field("col_sample_rate") double col_sample_rate,
    @Field("colsample_bylevel") double colsample_bylevel,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("colsample_bytree") double colsample_bytree,
    @Field("colsample_bynode") double colsample_bynode,
    @Field("monotone_constraints") KeyValueV3[] monotone_constraints,
    @Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
    @Field("max_delta_step") float max_delta_step,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("seed") long seed,
    @Field("min_split_improvement") float min_split_improvement,
    @Field("gamma") float gamma,
    @Field("nthread") int nthread,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("save_matrix_directory") String save_matrix_directory,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("max_bins") int max_bins,
    @Field("max_leaves") int max_leaves,
    @Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
    @Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
    @Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
    @Field("reg_lambda") float reg_lambda,
    @Field("reg_alpha") float reg_alpha,
    @Field("quiet_mode") boolean quiet_mode,
    @Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
    @Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
    @Field("rate_drop") float rate_drop,
    @Field("one_drop") boolean one_drop,
    @Field("skip_drop") float skip_drop,
    @Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
    @Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
    @Field("gpu_id") int[] gpu_id,
    @Field("interaction_constraints") String[][] interaction_constraints,
    @Field("scale_pos_weight") float scale_pos_weight,
    @Field("eval_metric") String eval_metric,
    @Field("score_eval_metric_only") boolean score_eval_metric_only,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/xgboost")
  Call trainXgboost();

  /** 
   * Resume grid search for XGBoost model.
   *   @param ntrees (same as n_estimators) Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
   *   @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
   *   @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
   *   @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
   *   @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
   *   @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
   *   @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
   *   @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
   *   @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
   *   @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
   *   @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
   *                               constraint and -1 to specify a decreasing constraint.
   *   @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
   *   @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
   *                                to happen
   *   @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
   *                to happen
   *   @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
   *                  (-nthreads parameter). Defaults to maximum available
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param max_bins For tree_method=hist only: maximum number of bins
   *   @param max_leaves For tree_method=hist only: maximum number of leaves
   *   @param tree_method Tree method
   *   @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
   *   @param booster Booster type
   *   @param reg_lambda L2 regularization
   *   @param reg_alpha L1 regularization
   *   @param quiet_mode Enable quiet mode
   *   @param sample_type For booster=dart only: sample_type
   *   @param normalize_type For booster=dart only: normalize_type
   *   @param rate_drop For booster=dart only: rate_drop (0..1)
   *   @param one_drop For booster=dart only: one_drop
   *   @param skip_drop For booster=dart only: skip_drop (0..1)
   *   @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
   *   @param backend Backend. By default (auto), a GPU is used if available.
   *   @param gpu_id Which GPU(s) to use.
   *   @param interaction_constraints A set of allowed column interactions.
   *   @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
   *                           with negative labels on gradient calculation. Useful for imbalanced problems.
   *   @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
   *   @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
   *                                 if scoring is frequent (eg. each iteration).
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/xgboost/resume")
  Call resumeXgboost(
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("min_child_weight") double min_child_weight,
    @Field("learn_rate") double learn_rate,
    @Field("eta") double eta,
    @Field("sample_rate") double sample_rate,
    @Field("subsample") double subsample,
    @Field("col_sample_rate") double col_sample_rate,
    @Field("colsample_bylevel") double colsample_bylevel,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("colsample_bytree") double colsample_bytree,
    @Field("colsample_bynode") double colsample_bynode,
    @Field("monotone_constraints") KeyValueV3[] monotone_constraints,
    @Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
    @Field("max_delta_step") float max_delta_step,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("seed") long seed,
    @Field("min_split_improvement") float min_split_improvement,
    @Field("gamma") float gamma,
    @Field("nthread") int nthread,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("save_matrix_directory") String save_matrix_directory,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("max_bins") int max_bins,
    @Field("max_leaves") int max_leaves,
    @Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
    @Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
    @Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
    @Field("reg_lambda") float reg_lambda,
    @Field("reg_alpha") float reg_alpha,
    @Field("quiet_mode") boolean quiet_mode,
    @Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
    @Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
    @Field("rate_drop") float rate_drop,
    @Field("one_drop") boolean one_drop,
    @Field("skip_drop") float skip_drop,
    @Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
    @Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
    @Field("gpu_id") int[] gpu_id,
    @Field("interaction_constraints") String[][] interaction_constraints,
    @Field("scale_pos_weight") float scale_pos_weight,
    @Field("eval_metric") String eval_metric,
    @Field("score_eval_metric_only") boolean score_eval_metric_only,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/xgboost/resume")
  Call resumeXgboost();

  /** 
   * Run grid search for Infogram model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param standardize Standardize numeric columns to have zero mean and unit variance.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
   *   @param max_iterations Maximum number of iterations.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
   *                    'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
   *                    parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
   *                    'xgboost' (if available, XGBoost with default parameters).
   *   @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
   *                           parameter.
   *   @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
   *                            otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
   *                            prediction of the response.
   *   @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
   *                                      defaulting to 0.1. For a specific feature, if the total information is higher
   *                                      than this threshold, and the corresponding net information is also higher than
   *                                      the threshold ``net_information_threshold``, that feature will be considered
   *                                      admissible. The total information is the x-axis of the Core Infogram. Default
   *                                      is -1 which gets set to 0.1.
   *   @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
   *                                    defaulting to 0.1.  For a specific feature, if the net information is higher
   *                                    than this threshold, and the corresponding total information is also higher than
   *                                    the total_information_threshold, that feature will be considered admissible. The
   *                                    net information is the y-axis of the Core Infogram. Default is -1 which gets set
   *                                    to 0.1.
   *   @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
   *                                    defaulting to 0.1.  This is only used when ``protected_columns`` is set by the
   *                                    user.  For a specific feature, if the relevance index value is higher than this
   *                                    threshold, and the corresponding safety index is also higher than the
   *                                    safety_index_threshold``, that feature will be considered admissible.  The
   *                                    relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
   *                                    to 0.1.
   *   @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
   *                                 to 0.1.  This is only used when protected_columns is set by the user.  For a
   *                                 specific feature, if the safety index value is higher than this threshold, and the
   *                                 corresponding relevance index is also higher than the relevance_index_threshold,
   *                                 that feature will be considered admissible.  The safety index is the y-axis of the
   *                                 Fair Infogram. Default is -1 which gets set to 0.1.
   *   @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
   *                        value greater than 0 and less than or equal to 1.0 is acceptable.
   *   @param top_n_features An integer specifying the number of columns to evaluate in the infogram.  The columns are
   *                         ranked by variable importance, and the top N are evaluated.  Defaults to 50.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/infogram")
  Call trainInfogram(
    @Field("seed") long seed,
    @Field("standardize") boolean standardize,
    @Field("plug_values") String plug_values,
    @Field("max_iterations") int max_iterations,
    @Field("prior") double prior,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("algorithm") InfogramAlgorithm algorithm,
    @Field("algorithm_params") String algorithm_params,
    @Field("protected_columns") String[] protected_columns,
    @Field("total_information_threshold") double total_information_threshold,
    @Field("net_information_threshold") double net_information_threshold,
    @Field("relevance_index_threshold") double relevance_index_threshold,
    @Field("safety_index_threshold") double safety_index_threshold,
    @Field("data_fraction") double data_fraction,
    @Field("top_n_features") int top_n_features,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/infogram")
  Call trainInfogram();

  /** 
   * Resume grid search for Infogram model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param standardize Standardize numeric columns to have zero mean and unit variance.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
   *   @param max_iterations Maximum number of iterations.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
   *                    'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
   *                    parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
   *                    'xgboost' (if available, XGBoost with default parameters).
   *   @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
   *                           parameter.
   *   @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
   *                            otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
   *                            prediction of the response.
   *   @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
   *                                      defaulting to 0.1. For a specific feature, if the total information is higher
   *                                      than this threshold, and the corresponding net information is also higher than
   *                                      the threshold ``net_information_threshold``, that feature will be considered
   *                                      admissible. The total information is the x-axis of the Core Infogram. Default
   *                                      is -1 which gets set to 0.1.
   *   @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
   *                                    defaulting to 0.1.  For a specific feature, if the net information is higher
   *                                    than this threshold, and the corresponding total information is also higher than
   *                                    the total_information_threshold, that feature will be considered admissible. The
   *                                    net information is the y-axis of the Core Infogram. Default is -1 which gets set
   *                                    to 0.1.
   *   @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
   *                                    defaulting to 0.1.  This is only used when ``protected_columns`` is set by the
   *                                    user.  For a specific feature, if the relevance index value is higher than this
   *                                    threshold, and the corresponding safety index is also higher than the
   *                                    safety_index_threshold``, that feature will be considered admissible.  The
   *                                    relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
   *                                    to 0.1.
   *   @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
   *                                 to 0.1.  This is only used when protected_columns is set by the user.  For a
   *                                 specific feature, if the safety index value is higher than this threshold, and the
   *                                 corresponding relevance index is also higher than the relevance_index_threshold,
   *                                 that feature will be considered admissible.  The safety index is the y-axis of the
   *                                 Fair Infogram. Default is -1 which gets set to 0.1.
   *   @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
   *                        value greater than 0 and less than or equal to 1.0 is acceptable.
   *   @param top_n_features An integer specifying the number of columns to evaluate in the infogram.  The columns are
   *                         ranked by variable importance, and the top N are evaluated.  Defaults to 50.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/infogram/resume")
  Call resumeInfogram(
    @Field("seed") long seed,
    @Field("standardize") boolean standardize,
    @Field("plug_values") String plug_values,
    @Field("max_iterations") int max_iterations,
    @Field("prior") double prior,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("algorithm") InfogramAlgorithm algorithm,
    @Field("algorithm_params") String algorithm_params,
    @Field("protected_columns") String[] protected_columns,
    @Field("total_information_threshold") double total_information_threshold,
    @Field("net_information_threshold") double net_information_threshold,
    @Field("relevance_index_threshold") double relevance_index_threshold,
    @Field("safety_index_threshold") double safety_index_threshold,
    @Field("data_fraction") double data_fraction,
    @Field("top_n_features") int top_n_features,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/infogram/resume")
  Call resumeInfogram();

  /** 
   * Run grid search for TargetEncoder model.
   *   @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
   *                            columns are specified, each group is encoded as a single column (interactions are
   *                            created internally).
   *   @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
   *                                            the result frame.
   *   @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
   *                   with prior probabilities (computed on the entire set). This allows to mitigate the effect of
   *                   categorical values with small cardinality. The blending effect can be tuned using the
   *                   `inflection_point` and `smoothing` parameters.
   *   @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
   *                           For a given categorical value, if it appears less that `inflection_point` in a data
   *                           sample, then the influence of the posterior probability will be smaller than the prior.
   *   @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
   *                    used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
   *                    sigmoid used for blending turns into a Heaviside step function.
   *   @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
   *                                1) "none" (default) - no holdout, using the entire training frame.
   *                                2) "leave_one_out" - current row's response value is subtracted from the per-level
   *                                frequencies pre-calculated on the entire training frame.
   *                                3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
   *   @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
   *                algorithm determine a reasonable amount of noise.
   *   @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/targetencoder")
  Call trainTargetencoder(
    @Field("columns_to_encode") String[][] columns_to_encode,
    @Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
    @Field("blending") boolean blending,
    @Field("inflection_point") double inflection_point,
    @Field("smoothing") double smoothing,
    @Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
    @Field("noise") double noise,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/targetencoder")
  Call trainTargetencoder();

  /** 
   * Resume grid search for TargetEncoder model.
   *   @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
   *                            columns are specified, each group is encoded as a single column (interactions are
   *                            created internally).
   *   @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
   *                                            the result frame.
   *   @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
   *                   with prior probabilities (computed on the entire set). This allows to mitigate the effect of
   *                   categorical values with small cardinality. The blending effect can be tuned using the
   *                   `inflection_point` and `smoothing` parameters.
   *   @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
   *                           For a given categorical value, if it appears less that `inflection_point` in a data
   *                           sample, then the influence of the posterior probability will be smaller than the prior.
   *   @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
   *                    used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
   *                    sigmoid used for blending turns into a Heaviside step function.
   *   @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
   *                                1) "none" (default) - no holdout, using the entire training frame.
   *                                2) "leave_one_out" - current row's response value is subtracted from the per-level
   *                                frequencies pre-calculated on the entire training frame.
   *                                3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
   *   @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
   *                algorithm determine a reasonable amount of noise.
   *   @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/targetencoder/resume")
  Call resumeTargetencoder(
    @Field("columns_to_encode") String[][] columns_to_encode,
    @Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
    @Field("blending") boolean blending,
    @Field("inflection_point") double inflection_point,
    @Field("smoothing") double smoothing,
    @Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
    @Field("noise") double noise,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/targetencoder/resume")
  Call resumeTargetencoder();

  /** 
   * Run grid search for DeepLearning model.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs.
   *   @param activation Activation function.
   *   @param hidden Hidden layer sizes (e.g. [100, 100]).
   *   @param epochs How many times the dataset should be iterated (streamed), can be fractional.
   *   @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
   *                                      are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
   *                                      automatic.
   *   @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
   *                                    operation and train_samples_per_iteration = -2 (auto-tuning).
   *   @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
   *   @param adaptive_rate Adaptive learning rate.
   *   @param rho Adaptive learning rate time decay factor (similarity to prior updates).
   *   @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
   *   @param rate Learning rate (higher => less stable, lower => slower convergence).
   *   @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
   *   @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
   *   @param momentum_start Initial momentum at the beginning of training (try 0.5).
   *   @param momentum_ramp Number of training samples for which momentum increases.
   *   @param momentum_stable Final momentum after the ramp is over (try 0.99).
   *   @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
   *   @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
   *   @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
   *                                hidden layer, defaults to 0.5.
   *   @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
   *   @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
   *   @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
   *   @param initial_weight_distribution Initial weight distribution.
   *   @param initial_weight_scale Uniform: -value...value, Normal: stddev.
   *   @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
   *   @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
   *   @param loss Loss function.
   *   @param score_interval Shortest time interval (in seconds) between model scoring.
   *   @param score_training_samples Number of training set samples for scoring (0 for all).
   *   @param score_validation_samples Number of validation set samples for scoring (0 for all).
   *   @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
   *   @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
   *   @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
   *   @param quiet_mode Enable quiet mode for less output to standard output.
   *   @param score_validation_sampling Method used to sample validation dataset for scoring.
   *   @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
   *   @param autoencoder Auto-Encoder.
   *   @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
   *                                omitted (without loss of accuracy). Useful for variable importances and auto-enabled
   *                                for autoencoder.
   *   @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
   *                      scaled input data.
   *   @param diagnostics Enable diagnostics for hidden layers.
   *   @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
   *                               large networks.
   *   @param fast_mode Enable fast mode (minor approximation in back-propagation).
   *   @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
   *                             cores busy).
   *   @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
   *                                  datasets.
   *   @param single_node_mode Run on a single node for fine-tuning of model parameters.
   *   @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
   *                                train_samples_per_iteration is close to #nodes x #rows, of if using
   *                                balance_classes).
   *   @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
   *   @param sparse Sparse data handling (more efficient for data with lots of 0 values).
   *   @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
   *                    but might slow down backpropagation.
   *   @param average_activation Average activation for sparse auto-encoder. #Experimental
   *   @param sparsity_beta Sparsity regularization. #Experimental
   *   @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
   *   @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
   *   @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
   *   @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
   *   @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
   *                            #Experimental
   *   @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
   *   @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
   *                                           enabled).
   *   @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/deeplearning")
  Call trainDeeplearning(
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("activation") DeepLearningActivation activation,
    @Field("hidden") int[] hidden,
    @Field("epochs") double epochs,
    @Field("train_samples_per_iteration") long train_samples_per_iteration,
    @Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
    @Field("seed") long seed,
    @Field("adaptive_rate") boolean adaptive_rate,
    @Field("rho") double rho,
    @Field("epsilon") double epsilon,
    @Field("rate") double rate,
    @Field("rate_annealing") double rate_annealing,
    @Field("rate_decay") double rate_decay,
    @Field("momentum_start") double momentum_start,
    @Field("momentum_ramp") double momentum_ramp,
    @Field("momentum_stable") double momentum_stable,
    @Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
    @Field("input_dropout_ratio") double input_dropout_ratio,
    @Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
    @Field("l1") double l1,
    @Field("l2") double l2,
    @Field("max_w2") float max_w2,
    @Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
    @Field("initial_weight_scale") double initial_weight_scale,
    @Field("initial_weights") String[] initial_weights,
    @Field("initial_biases") String[] initial_biases,
    @Field("loss") DeepLearningLoss loss,
    @Field("score_interval") double score_interval,
    @Field("score_training_samples") long score_training_samples,
    @Field("score_validation_samples") long score_validation_samples,
    @Field("score_duty_cycle") double score_duty_cycle,
    @Field("classification_stop") double classification_stop,
    @Field("regression_stop") double regression_stop,
    @Field("quiet_mode") boolean quiet_mode,
    @Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
    @Field("overwrite_with_best_model") boolean overwrite_with_best_model,
    @Field("autoencoder") boolean autoencoder,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("standardize") boolean standardize,
    @Field("diagnostics") boolean diagnostics,
    @Field("variable_importances") boolean variable_importances,
    @Field("fast_mode") boolean fast_mode,
    @Field("force_load_balance") boolean force_load_balance,
    @Field("replicate_training_data") boolean replicate_training_data,
    @Field("single_node_mode") boolean single_node_mode,
    @Field("shuffle_training_data") boolean shuffle_training_data,
    @Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
    @Field("sparse") boolean sparse,
    @Field("col_major") boolean col_major,
    @Field("average_activation") double average_activation,
    @Field("sparsity_beta") double sparsity_beta,
    @Field("max_categorical_features") int max_categorical_features,
    @Field("reproducible") boolean reproducible,
    @Field("export_weights_and_biases") boolean export_weights_and_biases,
    @Field("mini_batch_size") int mini_batch_size,
    @Field("elastic_averaging") boolean elastic_averaging,
    @Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
    @Field("elastic_averaging_regularization") double elastic_averaging_regularization,
    @Field("pretrained_autoencoder") String pretrained_autoencoder,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/deeplearning")
  Call trainDeeplearning();

  /** 
   * Resume grid search for DeepLearning model.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs.
   *   @param activation Activation function.
   *   @param hidden Hidden layer sizes (e.g. [100, 100]).
   *   @param epochs How many times the dataset should be iterated (streamed), can be fractional.
   *   @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
   *                                      are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
   *                                      automatic.
   *   @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
   *                                    operation and train_samples_per_iteration = -2 (auto-tuning).
   *   @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
   *   @param adaptive_rate Adaptive learning rate.
   *   @param rho Adaptive learning rate time decay factor (similarity to prior updates).
   *   @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
   *   @param rate Learning rate (higher => less stable, lower => slower convergence).
   *   @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
   *   @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
   *   @param momentum_start Initial momentum at the beginning of training (try 0.5).
   *   @param momentum_ramp Number of training samples for which momentum increases.
   *   @param momentum_stable Final momentum after the ramp is over (try 0.99).
   *   @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
   *   @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
   *   @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
   *                                hidden layer, defaults to 0.5.
   *   @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
   *   @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
   *   @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
   *   @param initial_weight_distribution Initial weight distribution.
   *   @param initial_weight_scale Uniform: -value...value, Normal: stddev.
   *   @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
   *   @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
   *   @param loss Loss function.
   *   @param score_interval Shortest time interval (in seconds) between model scoring.
   *   @param score_training_samples Number of training set samples for scoring (0 for all).
   *   @param score_validation_samples Number of validation set samples for scoring (0 for all).
   *   @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
   *   @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
   *   @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
   *   @param quiet_mode Enable quiet mode for less output to standard output.
   *   @param score_validation_sampling Method used to sample validation dataset for scoring.
   *   @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
   *   @param autoencoder Auto-Encoder.
   *   @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
   *                                omitted (without loss of accuracy). Useful for variable importances and auto-enabled
   *                                for autoencoder.
   *   @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
   *                      scaled input data.
   *   @param diagnostics Enable diagnostics for hidden layers.
   *   @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
   *                               large networks.
   *   @param fast_mode Enable fast mode (minor approximation in back-propagation).
   *   @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
   *                             cores busy).
   *   @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
   *                                  datasets.
   *   @param single_node_mode Run on a single node for fine-tuning of model parameters.
   *   @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
   *                                train_samples_per_iteration is close to #nodes x #rows, of if using
   *                                balance_classes).
   *   @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
   *   @param sparse Sparse data handling (more efficient for data with lots of 0 values).
   *   @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
   *                    but might slow down backpropagation.
   *   @param average_activation Average activation for sparse auto-encoder. #Experimental
   *   @param sparsity_beta Sparsity regularization. #Experimental
   *   @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
   *   @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
   *   @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
   *   @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
   *   @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
   *                            #Experimental
   *   @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
   *   @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
   *                                           enabled).
   *   @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/deeplearning/resume")
  Call resumeDeeplearning(
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("activation") DeepLearningActivation activation,
    @Field("hidden") int[] hidden,
    @Field("epochs") double epochs,
    @Field("train_samples_per_iteration") long train_samples_per_iteration,
    @Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
    @Field("seed") long seed,
    @Field("adaptive_rate") boolean adaptive_rate,
    @Field("rho") double rho,
    @Field("epsilon") double epsilon,
    @Field("rate") double rate,
    @Field("rate_annealing") double rate_annealing,
    @Field("rate_decay") double rate_decay,
    @Field("momentum_start") double momentum_start,
    @Field("momentum_ramp") double momentum_ramp,
    @Field("momentum_stable") double momentum_stable,
    @Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
    @Field("input_dropout_ratio") double input_dropout_ratio,
    @Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
    @Field("l1") double l1,
    @Field("l2") double l2,
    @Field("max_w2") float max_w2,
    @Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
    @Field("initial_weight_scale") double initial_weight_scale,
    @Field("initial_weights") String[] initial_weights,
    @Field("initial_biases") String[] initial_biases,
    @Field("loss") DeepLearningLoss loss,
    @Field("score_interval") double score_interval,
    @Field("score_training_samples") long score_training_samples,
    @Field("score_validation_samples") long score_validation_samples,
    @Field("score_duty_cycle") double score_duty_cycle,
    @Field("classification_stop") double classification_stop,
    @Field("regression_stop") double regression_stop,
    @Field("quiet_mode") boolean quiet_mode,
    @Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
    @Field("overwrite_with_best_model") boolean overwrite_with_best_model,
    @Field("autoencoder") boolean autoencoder,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("standardize") boolean standardize,
    @Field("diagnostics") boolean diagnostics,
    @Field("variable_importances") boolean variable_importances,
    @Field("fast_mode") boolean fast_mode,
    @Field("force_load_balance") boolean force_load_balance,
    @Field("replicate_training_data") boolean replicate_training_data,
    @Field("single_node_mode") boolean single_node_mode,
    @Field("shuffle_training_data") boolean shuffle_training_data,
    @Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
    @Field("sparse") boolean sparse,
    @Field("col_major") boolean col_major,
    @Field("average_activation") double average_activation,
    @Field("sparsity_beta") double sparsity_beta,
    @Field("max_categorical_features") int max_categorical_features,
    @Field("reproducible") boolean reproducible,
    @Field("export_weights_and_biases") boolean export_weights_and_biases,
    @Field("mini_batch_size") int mini_batch_size,
    @Field("elastic_averaging") boolean elastic_averaging,
    @Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
    @Field("elastic_averaging_regularization") double elastic_averaging_regularization,
    @Field("pretrained_autoencoder") String pretrained_autoencoder,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/deeplearning/resume")
  Call resumeDeeplearning();

  /** 
   * Run grid search for GLM model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param rand_family Random Component Family array.  One for each random component. Only support gaussian for now.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
   *                                   estimation using ml. It must be > 0.  This controls how much the dispersion
   *                                   parameter estimate is to be changed when the calculated loglikelihood actually
   *                                   decreases with the new dispersion.  In this case, instead of setting new
   *                                   dispersion = dispersion + change, we set new dispersion = dispersion +
   *                                   dispersion_learning_rate * change. Defaults to 0.5.
   *   @param tweedie_link_power Tweedie link power.
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
   *   @param standardize Standardize numeric columns to have zero mean and unit variance.
   *   @param cold_start Only applicable to multiple alpha/lambda values.  If false, build the next model for next set
   *                     of alpha/lambda values starting from the values provided by current model.  If true will start
   *                     GLM model from scratch.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
   *                    in the dataset.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative.
   *   @param max_iterations Maximum number of iterations.  Value should >=1.  A value of 0 is only set when only the
   *                         model coefficient names and model coefficient dimensions are needed.
   *   @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
   *                       solver.
   *   @param objective_epsilon Converge if  objective value changes less than this. Default (of -1.0) indicates: If
   *                            lambda_search is set to True the value of objective_epsilon is set to .0001. If the
   *                            lambda_search is set to False and lambda is equal to zero, the value of
   *                            objective_epsilon is set to .000001, for any other value of lambda the default value of
   *                            objective_epsilon is set to .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
   *                           is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
   *                           the default value is .0001. If lambda_search is set to True, the conditional values above
   *                           are 1E-8 and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
   *   @param link Link function.
   *   @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
   *                                      Negative Binomial only.
   *   @param rand_link Link function array for random component in HGLM.
   *   @param startval double array to initialize fixed and random coefficients for HGLM, coefficients for GLM.  If
   *                   standardize is true, the standardized coefficients should be used.  Otherwise, use the regular
   *                   coefficients.
   *   @param random_columns random columns indices for HGLM.
   *   @param calc_like if true, will return likelihood function value.
   *   @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
   *                                              predictors.  Default to false.
   *   @param intercept Include constant term in the model
   *   @param build_null_model If set, will build a model with only the intercept.  Default to false.
   *   @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM.  If set, will use the
   *                                   dispsersion parameter in init_dispersion_parameter as the standard error and use
   *                                   it to calculate the p-values. Default to false.
   *   @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM.  Store the initial value
   *                                    of dispersion parameter.  If fix_dispersion_parameter is set, this value will be
   *                                    used in the calculation of p-values.
   *   @param HGLM If set to true, will return HGLM model.  Otherwise, normal GLM model will be returned.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero).  Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
   *                             coefficients in standard form.  It is only supported for solver IRLSM.  It contains
   *                             four columns: names (strings for coefficient names or constant), values, types (
   *                             strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
   *                             constraint, 1 for second linear constraint, ...).
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs.
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
   *   @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
   *                                     tweedie_variance_power.
   *   @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
   *   @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
   *                             dispersion_epsilon, will break out of the dispersion parameter estimation loop using
   *                             maximum likelihood.
   *   @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
   *                          choose the lower and upper indices in the approximating of the infinite series summation.
   *   @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
   *                                    loop using maximum likelihood.
   *   @param generate_scoring_history If set to true, will generate scoring history for GLM.  This may significantly
   *                                   slow down the algo.
   *   @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
   *                           constraints.  Only available for linear constraints.
   *   @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate.  After new
   *                               coefficients are found, first beta constraints will be applied followed by the
   *                               application of linear constraints.  Note that the beta constraints in this case will
   *                               not be part of the objective function.  If false, will combine the beta and linear
   *                               constraints.
   *   @param constraint_eta0 For constrained GLM only.  It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
   *   @param constraint_tau For constrained GLM only.  It affects the setting of c_k+1=tau*c_k.
   *   @param constraint_alpha For constrained GLM only.  It affects the setting of  eta_k = eta_0/pow(c_0, alpha).
   *   @param constraint_beta For constrained GLM only.  It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
   *   @param constraint_c0 For constrained GLM only.  It affects the initial setting of epsilon_k = 1/c_0.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/glm")
  Call trainGlm(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("rand_family") GLMFamily[] rand_family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("dispersion_learning_rate") double dispersion_learning_rate,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("score_iteration_interval") int score_iteration_interval,
    @Field("standardize") boolean standardize,
    @Field("cold_start") boolean cold_start,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("influence") GLMInfluence influence,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
    @Field("rand_link") GLMLink[] rand_link,
    @Field("startval") double[] startval,
    @Field("random_columns") int[] random_columns,
    @Field("calc_like") boolean calc_like,
    @Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
    @Field("intercept") boolean intercept,
    @Field("build_null_model") boolean build_null_model,
    @Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
    @Field("init_dispersion_parameter") double init_dispersion_parameter,
    @Field("HGLM") boolean HGLM,
    @Field("prior") double prior,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("linear_constraints") String linear_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("dispersion_epsilon") double dispersion_epsilon,
    @Field("tweedie_epsilon") double tweedie_epsilon,
    @Field("max_iterations_dispersion") int max_iterations_dispersion,
    @Field("generate_scoring_history") boolean generate_scoring_history,
    @Field("init_optimal_glm") boolean init_optimal_glm,
    @Field("separate_linear_beta") boolean separate_linear_beta,
    @Field("constraint_eta0") double constraint_eta0,
    @Field("constraint_tau") double constraint_tau,
    @Field("constraint_alpha") double constraint_alpha,
    @Field("constraint_beta") double constraint_beta,
    @Field("constraint_c0") double constraint_c0,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/glm")
  Call trainGlm();

  /** 
   * Resume grid search for GLM model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param rand_family Random Component Family array.  One for each random component. Only support gaussian for now.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
   *                                   estimation using ml. It must be > 0.  This controls how much the dispersion
   *                                   parameter estimate is to be changed when the calculated loglikelihood actually
   *                                   decreases with the new dispersion.  In this case, instead of setting new
   *                                   dispersion = dispersion + change, we set new dispersion = dispersion +
   *                                   dispersion_learning_rate * change. Defaults to 0.5.
   *   @param tweedie_link_power Tweedie link power.
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
   *   @param standardize Standardize numeric columns to have zero mean and unit variance.
   *   @param cold_start Only applicable to multiple alpha/lambda values.  If false, build the next model for next set
   *                     of alpha/lambda values starting from the values provided by current model.  If true will start
   *                     GLM model from scratch.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
   *                    in the dataset.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative.
   *   @param max_iterations Maximum number of iterations.  Value should >=1.  A value of 0 is only set when only the
   *                         model coefficient names and model coefficient dimensions are needed.
   *   @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
   *                       solver.
   *   @param objective_epsilon Converge if  objective value changes less than this. Default (of -1.0) indicates: If
   *                            lambda_search is set to True the value of objective_epsilon is set to .0001. If the
   *                            lambda_search is set to False and lambda is equal to zero, the value of
   *                            objective_epsilon is set to .000001, for any other value of lambda the default value of
   *                            objective_epsilon is set to .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
   *                           is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
   *                           the default value is .0001. If lambda_search is set to True, the conditional values above
   *                           are 1E-8 and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
   *   @param link Link function.
   *   @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
   *                                      Negative Binomial only.
   *   @param rand_link Link function array for random component in HGLM.
   *   @param startval double array to initialize fixed and random coefficients for HGLM, coefficients for GLM.  If
   *                   standardize is true, the standardized coefficients should be used.  Otherwise, use the regular
   *                   coefficients.
   *   @param random_columns random columns indices for HGLM.
   *   @param calc_like if true, will return likelihood function value.
   *   @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
   *                                              predictors.  Default to false.
   *   @param intercept Include constant term in the model
   *   @param build_null_model If set, will build a model with only the intercept.  Default to false.
   *   @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM.  If set, will use the
   *                                   dispsersion parameter in init_dispersion_parameter as the standard error and use
   *                                   it to calculate the p-values. Default to false.
   *   @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM.  Store the initial value
   *                                    of dispersion parameter.  If fix_dispersion_parameter is set, this value will be
   *                                    used in the calculation of p-values.
   *   @param HGLM If set to true, will return HGLM model.  Otherwise, normal GLM model will be returned.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero).  Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
   *                             coefficients in standard form.  It is only supported for solver IRLSM.  It contains
   *                             four columns: names (strings for coefficient names or constant), values, types (
   *                             strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
   *                             constraint, 1 for second linear constraint, ...).
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs.
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
   *   @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
   *                                     tweedie_variance_power.
   *   @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
   *   @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
   *                             dispersion_epsilon, will break out of the dispersion parameter estimation loop using
   *                             maximum likelihood.
   *   @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
   *                          choose the lower and upper indices in the approximating of the infinite series summation.
   *   @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
   *                                    loop using maximum likelihood.
   *   @param generate_scoring_history If set to true, will generate scoring history for GLM.  This may significantly
   *                                   slow down the algo.
   *   @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
   *                           constraints.  Only available for linear constraints.
   *   @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate.  After new
   *                               coefficients are found, first beta constraints will be applied followed by the
   *                               application of linear constraints.  Note that the beta constraints in this case will
   *                               not be part of the objective function.  If false, will combine the beta and linear
   *                               constraints.
   *   @param constraint_eta0 For constrained GLM only.  It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
   *   @param constraint_tau For constrained GLM only.  It affects the setting of c_k+1=tau*c_k.
   *   @param constraint_alpha For constrained GLM only.  It affects the setting of  eta_k = eta_0/pow(c_0, alpha).
   *   @param constraint_beta For constrained GLM only.  It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
   *   @param constraint_c0 For constrained GLM only.  It affects the initial setting of epsilon_k = 1/c_0.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/glm/resume")
  Call resumeGlm(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("rand_family") GLMFamily[] rand_family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("dispersion_learning_rate") double dispersion_learning_rate,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("score_iteration_interval") int score_iteration_interval,
    @Field("standardize") boolean standardize,
    @Field("cold_start") boolean cold_start,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("influence") GLMInfluence influence,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
    @Field("rand_link") GLMLink[] rand_link,
    @Field("startval") double[] startval,
    @Field("random_columns") int[] random_columns,
    @Field("calc_like") boolean calc_like,
    @Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
    @Field("intercept") boolean intercept,
    @Field("build_null_model") boolean build_null_model,
    @Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
    @Field("init_dispersion_parameter") double init_dispersion_parameter,
    @Field("HGLM") boolean HGLM,
    @Field("prior") double prior,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("linear_constraints") String linear_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("dispersion_epsilon") double dispersion_epsilon,
    @Field("tweedie_epsilon") double tweedie_epsilon,
    @Field("max_iterations_dispersion") int max_iterations_dispersion,
    @Field("generate_scoring_history") boolean generate_scoring_history,
    @Field("init_optimal_glm") boolean init_optimal_glm,
    @Field("separate_linear_beta") boolean separate_linear_beta,
    @Field("constraint_eta0") double constraint_eta0,
    @Field("constraint_tau") double constraint_tau,
    @Field("constraint_alpha") double constraint_alpha,
    @Field("constraint_beta") double constraint_beta,
    @Field("constraint_c0") double constraint_c0,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/glm/resume")
  Call resumeGlm();

  /** 
   * Run grid search for GLRM model.
   *   @param transform Transformation of training data
   *   @param k Rank of matrix approximation
   *   @param loss Numeric loss function
   *   @param multi_loss Categorical loss function
   *   @param loss_by_col Loss function by column (override)
   *   @param loss_by_col_idx Loss function by column index (override)
   *   @param period Length of period (only used with periodic loss function)
   *   @param regularization_x Regularization function for X matrix
   *   @param regularization_y Regularization function for Y matrix
   *   @param gamma_x Regularization weight on X matrix
   *   @param gamma_y Regularization weight on Y matrix
   *   @param max_iterations Maximum number of iterations
   *   @param max_updates Maximum number of updates, defaults to 2*max_iterations
   *   @param init_step_size Initial step size
   *   @param min_step_size Minimum step size
   *   @param seed RNG seed for initialization
   *   @param init Initialization mode
   *   @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
   *                     and unstable)
   *   @param user_y User-specified initial Y
   *   @param user_x User-specified initial X
   *   @param loading_name [Deprecated] Use representation_name instead.  Frame key to save resulting X.
   *   @param representation_name Frame key to save resulting X
   *   @param expand_user_y Expand categorical columns in user-specified initial Y
   *   @param impute_original Reconstruct original training data by reversing transform
   *   @param recover_svd Recover singular values and eigenvectors of XY
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/glrm")
  Call trainGlrm(
    @Field("transform") DataInfoTransformType transform,
    @Field("k") int k,
    @Field("loss") GenmodelalgosglrmGlrmLoss loss,
    @Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
    @Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
    @Field("loss_by_col_idx") int[] loss_by_col_idx,
    @Field("period") int period,
    @Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
    @Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
    @Field("gamma_x") double gamma_x,
    @Field("gamma_y") double gamma_y,
    @Field("max_iterations") int max_iterations,
    @Field("max_updates") int max_updates,
    @Field("init_step_size") double init_step_size,
    @Field("min_step_size") double min_step_size,
    @Field("seed") long seed,
    @Field("init") GenmodelalgosglrmGlrmInitialization init,
    @Field("svd_method") SVDMethod svd_method,
    @Field("user_y") String user_y,
    @Field("user_x") String user_x,
    @Field("loading_name") String loading_name,
    @Field("representation_name") String representation_name,
    @Field("expand_user_y") boolean expand_user_y,
    @Field("impute_original") boolean impute_original,
    @Field("recover_svd") boolean recover_svd,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/glrm")
  Call trainGlrm(@Field("k") int k);

  /** 
   * Resume grid search for GLRM model.
   *   @param transform Transformation of training data
   *   @param k Rank of matrix approximation
   *   @param loss Numeric loss function
   *   @param multi_loss Categorical loss function
   *   @param loss_by_col Loss function by column (override)
   *   @param loss_by_col_idx Loss function by column index (override)
   *   @param period Length of period (only used with periodic loss function)
   *   @param regularization_x Regularization function for X matrix
   *   @param regularization_y Regularization function for Y matrix
   *   @param gamma_x Regularization weight on X matrix
   *   @param gamma_y Regularization weight on Y matrix
   *   @param max_iterations Maximum number of iterations
   *   @param max_updates Maximum number of updates, defaults to 2*max_iterations
   *   @param init_step_size Initial step size
   *   @param min_step_size Minimum step size
   *   @param seed RNG seed for initialization
   *   @param init Initialization mode
   *   @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
   *                     and unstable)
   *   @param user_y User-specified initial Y
   *   @param user_x User-specified initial X
   *   @param loading_name [Deprecated] Use representation_name instead.  Frame key to save resulting X.
   *   @param representation_name Frame key to save resulting X
   *   @param expand_user_y Expand categorical columns in user-specified initial Y
   *   @param impute_original Reconstruct original training data by reversing transform
   *   @param recover_svd Recover singular values and eigenvectors of XY
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/glrm/resume")
  Call resumeGlrm(
    @Field("transform") DataInfoTransformType transform,
    @Field("k") int k,
    @Field("loss") GenmodelalgosglrmGlrmLoss loss,
    @Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
    @Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
    @Field("loss_by_col_idx") int[] loss_by_col_idx,
    @Field("period") int period,
    @Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
    @Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
    @Field("gamma_x") double gamma_x,
    @Field("gamma_y") double gamma_y,
    @Field("max_iterations") int max_iterations,
    @Field("max_updates") int max_updates,
    @Field("init_step_size") double init_step_size,
    @Field("min_step_size") double min_step_size,
    @Field("seed") long seed,
    @Field("init") GenmodelalgosglrmGlrmInitialization init,
    @Field("svd_method") SVDMethod svd_method,
    @Field("user_y") String user_y,
    @Field("user_x") String user_x,
    @Field("loading_name") String loading_name,
    @Field("representation_name") String representation_name,
    @Field("expand_user_y") boolean expand_user_y,
    @Field("impute_original") boolean impute_original,
    @Field("recover_svd") boolean recover_svd,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/glrm/resume")
  Call resumeGlrm(@Field("k") int k);

  /** 
   * Run grid search for KMeans model.
   *   @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
   *                      center. The user-specified points must have the same number of columns as the training
   *                      observations. The number of rows must equal the number of clusters
   *   @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
   *                         iteration)
   *   @param standardize Standardize columns before computing distances
   *   @param seed RNG Seed
   *   @param init Initialization mode
   *   @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
   *   @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
   *                                   The length of the constraints array has to be the same as the number of clusters.
   *   @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
   *            will find up to k centroids.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/kmeans")
  Call trainKmeans(
    @Field("user_points") String user_points,
    @Field("max_iterations") int max_iterations,
    @Field("standardize") boolean standardize,
    @Field("seed") long seed,
    @Field("init") KMeansInitialization init,
    @Field("estimate_k") boolean estimate_k,
    @Field("cluster_size_constraints") int[] cluster_size_constraints,
    @Field("k") int k,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/kmeans")
  Call trainKmeans();

  /** 
   * Resume grid search for KMeans model.
   *   @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
   *                      center. The user-specified points must have the same number of columns as the training
   *                      observations. The number of rows must equal the number of clusters
   *   @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
   *                         iteration)
   *   @param standardize Standardize columns before computing distances
   *   @param seed RNG Seed
   *   @param init Initialization mode
   *   @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
   *   @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
   *                                   The length of the constraints array has to be the same as the number of clusters.
   *   @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
   *            will find up to k centroids.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/kmeans/resume")
  Call resumeKmeans(
    @Field("user_points") String user_points,
    @Field("max_iterations") int max_iterations,
    @Field("standardize") boolean standardize,
    @Field("seed") long seed,
    @Field("init") KMeansInitialization init,
    @Field("estimate_k") boolean estimate_k,
    @Field("cluster_size_constraints") int[] cluster_size_constraints,
    @Field("k") int k,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/kmeans/resume")
  Call resumeKmeans();

  /** 
   * Run grid search for NaiveBayes model.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param laplace Laplace smoothing parameter
   *   @param min_sdev Min. standard deviation to use for observations with not enough data
   *   @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
   *   @param min_prob Min. probability to use for observations with not enough data
   *   @param eps_prob Cutoff below which probability is replaced with min_prob
   *   @param compute_metrics Compute metrics on training data
   *   @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
   *               or "AUTO")
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/naivebayes")
  Call trainNaivebayes(
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("laplace") double laplace,
    @Field("min_sdev") double min_sdev,
    @Field("eps_sdev") double eps_sdev,
    @Field("min_prob") double min_prob,
    @Field("eps_prob") double eps_prob,
    @Field("compute_metrics") boolean compute_metrics,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/naivebayes")
  Call trainNaivebayes();

  /** 
   * Resume grid search for NaiveBayes model.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param laplace Laplace smoothing parameter
   *   @param min_sdev Min. standard deviation to use for observations with not enough data
   *   @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
   *   @param min_prob Min. probability to use for observations with not enough data
   *   @param eps_prob Cutoff below which probability is replaced with min_prob
   *   @param compute_metrics Compute metrics on training data
   *   @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
   *               or "AUTO")
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/naivebayes/resume")
  Call resumeNaivebayes(
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("laplace") double laplace,
    @Field("min_sdev") double min_sdev,
    @Field("eps_sdev") double eps_sdev,
    @Field("min_prob") double min_prob,
    @Field("eps_prob") double eps_prob,
    @Field("compute_metrics") boolean compute_metrics,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/naivebayes/resume")
  Call resumeNaivebayes();

  /** 
   * Run grid search for PCA model.
   *   @param transform Transformation of training data
   *   @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
   *                     distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
   *                     using the power iteration method (experimental); Randomized - uses randomized subspace
   *                     iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
   *                     regularization and solves for the SVD using local matrix algebra (experimental)
   *   @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
   *                   eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
   *                   decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
   *                   decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
   *                   using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
   *                   https://github.com/fommil/matrix-toolkits-java/
   *   @param k Rank of matrix approximation
   *   @param max_iterations Maximum training iterations
   *   @param seed RNG seed for initialization
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param compute_metrics Whether to compute metrics on the training data
   *   @param impute_missing Whether to impute missing entries with the column mean
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/pca")
  Call trainPca(
    @Field("transform") DataInfoTransformType transform,
    @Field("pca_method") PCAMethod pca_method,
    @Field("pca_impl") PCAImplementation pca_impl,
    @Field("k") int k,
    @Field("max_iterations") int max_iterations,
    @Field("seed") long seed,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("compute_metrics") boolean compute_metrics,
    @Field("impute_missing") boolean impute_missing,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/pca")
  Call trainPca(@Field("k") int k);

  /** 
   * Resume grid search for PCA model.
   *   @param transform Transformation of training data
   *   @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
   *                     distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
   *                     using the power iteration method (experimental); Randomized - uses randomized subspace
   *                     iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
   *                     regularization and solves for the SVD using local matrix algebra (experimental)
   *   @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
   *                   eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
   *                   decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
   *                   decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
   *                   using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
   *                   https://github.com/fommil/matrix-toolkits-java/
   *   @param k Rank of matrix approximation
   *   @param max_iterations Maximum training iterations
   *   @param seed RNG seed for initialization
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param compute_metrics Whether to compute metrics on the training data
   *   @param impute_missing Whether to impute missing entries with the column mean
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/pca/resume")
  Call resumePca(
    @Field("transform") DataInfoTransformType transform,
    @Field("pca_method") PCAMethod pca_method,
    @Field("pca_impl") PCAImplementation pca_impl,
    @Field("k") int k,
    @Field("max_iterations") int max_iterations,
    @Field("seed") long seed,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("compute_metrics") boolean compute_metrics,
    @Field("impute_missing") boolean impute_missing,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/pca/resume")
  Call resumePca(@Field("k") int k);

  /** 
   * Run grid search for SVD model.
   *   @param transform Transformation of training data
   *   @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
   *   @param nv Number of right singular vectors
   *   @param max_iterations Maximum iterations
   *   @param seed RNG seed for k-means++ initialization
   *   @param keep_u Save left singular vectors?
   *   @param u_name Frame key to save left singular vectors
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/svd")
  Call trainSvd(
    @Field("transform") DataInfoTransformType transform,
    @Field("svd_method") SVDMethod svd_method,
    @Field("nv") int nv,
    @Field("max_iterations") int max_iterations,
    @Field("seed") long seed,
    @Field("keep_u") boolean keep_u,
    @Field("u_name") String u_name,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/svd")
  Call trainSvd();

  /** 
   * Resume grid search for SVD model.
   *   @param transform Transformation of training data
   *   @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
   *   @param nv Number of right singular vectors
   *   @param max_iterations Maximum iterations
   *   @param seed RNG seed for k-means++ initialization
   *   @param keep_u Save left singular vectors?
   *   @param u_name Frame key to save left singular vectors
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/svd/resume")
  Call resumeSvd(
    @Field("transform") DataInfoTransformType transform,
    @Field("svd_method") SVDMethod svd_method,
    @Field("nv") int nv,
    @Field("max_iterations") int max_iterations,
    @Field("seed") long seed,
    @Field("keep_u") boolean keep_u,
    @Field("u_name") String u_name,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/svd/resume")
  Call resumeSvd();

  /** 
   * Run grid search for DRF model.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
   *                 for classification and p/3 for regression (where p is the # of predictors
   *   @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
   *                                higher accuracy.
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/drf")
  Call trainDrf(
    @Field("mtries") int mtries,
    @Field("binomial_double_trees") boolean binomial_double_trees,
    @Field("sample_rate") double sample_rate,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/drf")
  Call trainDrf();

  /** 
   * Resume grid search for DRF model.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
   *                 for classification and p/3 for regression (where p is the # of predictors
   *   @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
   *                                higher accuracy.
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/drf/resume")
  Call resumeDrf(
    @Field("mtries") int mtries,
    @Field("binomial_double_trees") boolean binomial_double_trees,
    @Field("sample_rate") double sample_rate,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/drf/resume")
  Call resumeDrf();

  /** 
   * Run grid search for GBM model.
   *   @param learn_rate Learning rate (from 0.0 to 1.0)
   *   @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate Column sample rate (from 0.0 to 1.0)
   *   @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
   *                               constraint and -1 to specify a decreasing constraint.
   *   @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
   *   @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
   *                               predictions
   *   @param interaction_constraints A set of allowed column interactions.
   *   @param auto_rebalance Allow automatic rebalancing of training and validation datasets
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/gbm")
  Call trainGbm(
    @Field("learn_rate") double learn_rate,
    @Field("learn_rate_annealing") double learn_rate_annealing,
    @Field("sample_rate") double sample_rate,
    @Field("col_sample_rate") double col_sample_rate,
    @Field("monotone_constraints") KeyValueV3[] monotone_constraints,
    @Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
    @Field("pred_noise_bandwidth") double pred_noise_bandwidth,
    @Field("interaction_constraints") String[][] interaction_constraints,
    @Field("auto_rebalance") boolean auto_rebalance,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/gbm")
  Call trainGbm();

  /** 
   * Resume grid search for GBM model.
   *   @param learn_rate Learning rate (from 0.0 to 1.0)
   *   @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate Column sample rate (from 0.0 to 1.0)
   *   @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
   *                               constraint and -1 to specify a decreasing constraint.
   *   @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
   *   @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
   *                               predictions
   *   @param interaction_constraints A set of allowed column interactions.
   *   @param auto_rebalance Allow automatic rebalancing of training and validation datasets
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/gbm/resume")
  Call resumeGbm(
    @Field("learn_rate") double learn_rate,
    @Field("learn_rate_annealing") double learn_rate_annealing,
    @Field("sample_rate") double sample_rate,
    @Field("col_sample_rate") double col_sample_rate,
    @Field("monotone_constraints") KeyValueV3[] monotone_constraints,
    @Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
    @Field("pred_noise_bandwidth") double pred_noise_bandwidth,
    @Field("interaction_constraints") String[][] interaction_constraints,
    @Field("auto_rebalance") boolean auto_rebalance,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/gbm/resume")
  Call resumeGbm();

  /** 
   * Run grid search for IsolationForest model.
   *   @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
   *                      parameters sample_size and sample_rate should be defined. If sample_rate is defined,
   *                      sample_size will be ignored.
   *   @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
   *                      range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
   *                      instead.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
   *                 predictors)/3.
   *   @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
   *                        the predict function will not mark observations as anomalies and only anomaly score will be
   *                        returned. Defaults to -1 (undefined).
   *   @param validation_response_column (experimental) Name of the response column in the validation frame. Response
   *                                     column should be binary and indicate not anomaly/anomaly.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/isolationforest")
  Call trainIsolationforest(
    @Field("sample_size") long sample_size,
    @Field("sample_rate") double sample_rate,
    @Field("mtries") int mtries,
    @Field("contamination") double contamination,
    @Field("validation_response_column") String validation_response_column,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/isolationforest")
  Call trainIsolationforest();

  /** 
   * Resume grid search for IsolationForest model.
   *   @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
   *                      parameters sample_size and sample_rate should be defined. If sample_rate is defined,
   *                      sample_size will be ignored.
   *   @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
   *                      range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
   *                      instead.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
   *                 predictors)/3.
   *   @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
   *                        the predict function will not mark observations as anomalies and only anomaly score will be
   *                        returned. Defaults to -1 (undefined).
   *   @param validation_response_column (experimental) Name of the response column in the validation frame. Response
   *                                     column should be binary and indicate not anomaly/anomaly.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/isolationforest/resume")
  Call resumeIsolationforest(
    @Field("sample_size") long sample_size,
    @Field("sample_rate") double sample_rate,
    @Field("mtries") int mtries,
    @Field("contamination") double contamination,
    @Field("validation_response_column") String validation_response_column,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/isolationforest/resume")
  Call resumeIsolationforest();

  /** 
   * Run grid search for ExtendedIsolationForest model.
   *   @param ntrees Number of Extended Isolation Forest trees.
   *   @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
   *   @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
   *                          extension_Level = 0 behaves like Isolation Forest.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/extendedisolationforest")
  Call trainExtendedisolationforest(
    @Field("ntrees") int ntrees,
    @Field("sample_size") int sample_size,
    @Field("extension_level") int extension_level,
    @Field("seed") long seed,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("disable_training_metrics") boolean disable_training_metrics,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/extendedisolationforest")
  Call trainExtendedisolationforest();

  /** 
   * Resume grid search for ExtendedIsolationForest model.
   *   @param ntrees Number of Extended Isolation Forest trees.
   *   @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
   *   @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
   *                          extension_Level = 0 behaves like Isolation Forest.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/extendedisolationforest/resume")
  Call resumeExtendedisolationforest(
    @Field("ntrees") int ntrees,
    @Field("sample_size") int sample_size,
    @Field("extension_level") int extension_level,
    @Field("seed") long seed,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("disable_training_metrics") boolean disable_training_metrics,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/extendedisolationforest/resume")
  Call resumeExtendedisolationforest();

  /** 
   * Run grid search for Aggregator model.
   *   @param transform Transformation of training data
   *   @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
   *   @param k Rank of matrix approximation
   *   @param max_iterations Maximum number of iterations for PCA
   *   @param target_num_exemplars Targeted number of exemplars
   *   @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
   *   @param seed RNG seed for initialization
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param save_mapping_frame Whether to export the mapping of the aggregated frame
   *   @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
   *                                             of exemplars collected didn't change
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/aggregator")
  Call trainAggregator(
    @Field("transform") DataInfoTransformType transform,
    @Field("pca_method") PCAMethod pca_method,
    @Field("k") int k,
    @Field("max_iterations") int max_iterations,
    @Field("target_num_exemplars") int target_num_exemplars,
    @Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
    @Field("seed") long seed,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("save_mapping_frame") boolean save_mapping_frame,
    @Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/aggregator")
  Call trainAggregator();

  /** 
   * Resume grid search for Aggregator model.
   *   @param transform Transformation of training data
   *   @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
   *   @param k Rank of matrix approximation
   *   @param max_iterations Maximum number of iterations for PCA
   *   @param target_num_exemplars Targeted number of exemplars
   *   @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
   *   @param seed RNG seed for initialization
   *   @param use_all_factor_levels Whether first factor level is included in each categorical expansion
   *   @param save_mapping_frame Whether to export the mapping of the aggregated frame
   *   @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
   *                                             of exemplars collected didn't change
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/aggregator/resume")
  Call resumeAggregator(
    @Field("transform") DataInfoTransformType transform,
    @Field("pca_method") PCAMethod pca_method,
    @Field("k") int k,
    @Field("max_iterations") int max_iterations,
    @Field("target_num_exemplars") int target_num_exemplars,
    @Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
    @Field("seed") long seed,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("save_mapping_frame") boolean save_mapping_frame,
    @Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/aggregator/resume")
  Call resumeAggregator();

  /** 
   * Run grid search for Word2Vec model.
   *   @param vec_size Set size of word vectors
   *   @param window_size Set max skip length between words
   *   @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
   *                           training data
   *                                           will be randomly down-sampled; useful range is (0, 1e-5)
   *   @param norm_model Use Hierarchical Softmax
   *   @param epochs Number of training iterations to run
   *   @param min_word_freq This will discard words that appear less than  times
   *   @param init_learning_rate Set the starting learning rate
   *   @param word_model The word model to use (SkipGram or CBOW)
   *   @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/word2vec")
  Call trainWord2vec(
    @Field("vec_size") int vec_size,
    @Field("window_size") int window_size,
    @Field("sent_sample_rate") float sent_sample_rate,
    @Field("norm_model") Word2VecNormModel norm_model,
    @Field("epochs") int epochs,
    @Field("min_word_freq") int min_word_freq,
    @Field("init_learning_rate") float init_learning_rate,
    @Field("word_model") Word2VecWordModel word_model,
    @Field("pre_trained") String pre_trained,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/word2vec")
  Call trainWord2vec();

  /** 
   * Resume grid search for Word2Vec model.
   *   @param vec_size Set size of word vectors
   *   @param window_size Set max skip length between words
   *   @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
   *                           training data
   *                                           will be randomly down-sampled; useful range is (0, 1e-5)
   *   @param norm_model Use Hierarchical Softmax
   *   @param epochs Number of training iterations to run
   *   @param min_word_freq This will discard words that appear less than  times
   *   @param init_learning_rate Set the starting learning rate
   *   @param word_model The word model to use (SkipGram or CBOW)
   *   @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/word2vec/resume")
  Call resumeWord2vec(
    @Field("vec_size") int vec_size,
    @Field("window_size") int window_size,
    @Field("sent_sample_rate") float sent_sample_rate,
    @Field("norm_model") Word2VecNormModel norm_model,
    @Field("epochs") int epochs,
    @Field("min_word_freq") int min_word_freq,
    @Field("init_learning_rate") float init_learning_rate,
    @Field("word_model") Word2VecWordModel word_model,
    @Field("pre_trained") String pre_trained,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/word2vec/resume")
  Call resumeWord2vec();

  /** 
   * Run grid search for StackedEnsemble model.
   *   @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
   *                      individual models. If not using blending frame, then models must have been cross-validated
   *                      using nfolds > 1, and folds must be identical across models.
   *   @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
   *                                negative weights; if validation_frame is present, a lambda search is performed),
   *                                'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
   *                                default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
   *                                parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
   *                                available, XGBoost with default parameters).
   *   @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
   *                             or >= 2).
   *   @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
   *                                      Defaults to AUTO (which is currently set to Random). The 'Stratified' option
   *                                      will stratify the folds based on the response variable, for classification
   *                                      problems.
   *   @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
   *                                  validation of the metalearner.
   *   @param metalearner_transform Transformation used for the level one frame.
   *   @param keep_levelone_frame Keep level one frame used for metalearner training.
   *   @param metalearner_params Parameters for metalearner algorithm
   *   @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
   *                         (triggers blending mode if provided)
   *   @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
   *               random number)
   *   @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
   *                                 use all training samples, enter 0.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/stackedensemble")
  Call trainStackedensemble(
    @Field("base_models") String[] base_models,
    @Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
    @Field("metalearner_nfolds") int metalearner_nfolds,
    @Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
    @Field("metalearner_fold_column") String metalearner_fold_column,
    @Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
    @Field("keep_levelone_frame") boolean keep_levelone_frame,
    @Field("metalearner_params") String metalearner_params,
    @Field("blending_frame") String blending_frame,
    @Field("seed") long seed,
    @Field("score_training_samples") long score_training_samples,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/stackedensemble")
  Call trainStackedensemble(@Field("base_models") String[] base_models);

  /** 
   * Resume grid search for StackedEnsemble model.
   *   @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
   *                      individual models. If not using blending frame, then models must have been cross-validated
   *                      using nfolds > 1, and folds must be identical across models.
   *   @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
   *                                negative weights; if validation_frame is present, a lambda search is performed),
   *                                'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
   *                                default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
   *                                parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
   *                                available, XGBoost with default parameters).
   *   @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
   *                             or >= 2).
   *   @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
   *                                      Defaults to AUTO (which is currently set to Random). The 'Stratified' option
   *                                      will stratify the folds based on the response variable, for classification
   *                                      problems.
   *   @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
   *                                  validation of the metalearner.
   *   @param metalearner_transform Transformation used for the level one frame.
   *   @param keep_levelone_frame Keep level one frame used for metalearner training.
   *   @param metalearner_params Parameters for metalearner algorithm
   *   @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
   *                         (triggers blending mode if provided)
   *   @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
   *               random number)
   *   @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
   *                                 use all training samples, enter 0.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/stackedensemble/resume")
  Call resumeStackedensemble(
    @Field("base_models") String[] base_models,
    @Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
    @Field("metalearner_nfolds") int metalearner_nfolds,
    @Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
    @Field("metalearner_fold_column") String metalearner_fold_column,
    @Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
    @Field("keep_levelone_frame") boolean keep_levelone_frame,
    @Field("metalearner_params") String metalearner_params,
    @Field("blending_frame") String blending_frame,
    @Field("seed") long seed,
    @Field("score_training_samples") long score_training_samples,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/stackedensemble/resume")
  Call resumeStackedensemble(@Field("base_models") String[] base_models);

  /** 
   * Run grid search for CoxPH model.
   *   @param start_column Start Time Column.
   *   @param stop_column Stop Time Column.
   *   @param stratify_by List of columns to use for stratification.
   *   @param ties Method for Handling Ties.
   *   @param init Coefficient starting value.
   *   @param lre_min Minimum log-relative error.
   *   @param max_iterations Maximum number of iterations.
   *   @param interactions_only A list of columns that should only be used to create interactions but should not itself
   *                            participate in model training.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
   *   @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/coxph")
  Call trainCoxph(
    @Field("start_column") String start_column,
    @Field("stop_column") String stop_column,
    @Field("stratify_by") String[] stratify_by,
    @Field("ties") CoxPHTies ties,
    @Field("init") double init,
    @Field("lre_min") double lre_min,
    @Field("max_iterations") int max_iterations,
    @Field("interactions_only") String[] interactions_only,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("single_node_mode") boolean single_node_mode,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/coxph")
  Call trainCoxph();

  /** 
   * Resume grid search for CoxPH model.
   *   @param start_column Start Time Column.
   *   @param stop_column Stop Time Column.
   *   @param stratify_by List of columns to use for stratification.
   *   @param ties Method for Handling Ties.
   *   @param init Coefficient starting value.
   *   @param lre_min Minimum log-relative error.
   *   @param max_iterations Maximum number of iterations.
   *   @param interactions_only A list of columns that should only be used to create interactions but should not itself
   *                            participate in model training.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
   *   @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/coxph/resume")
  Call resumeCoxph(
    @Field("start_column") String start_column,
    @Field("stop_column") String stop_column,
    @Field("stratify_by") String[] stratify_by,
    @Field("ties") CoxPHTies ties,
    @Field("init") double init,
    @Field("lre_min") double lre_min,
    @Field("max_iterations") int max_iterations,
    @Field("interactions_only") String[] interactions_only,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("use_all_factor_levels") boolean use_all_factor_levels,
    @Field("single_node_mode") boolean single_node_mode,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/coxph/resume")
  Call resumeCoxph();

  /** 
   * Run grid search for Generic model.
   *   @param path Path to file with self-contained model archive.
   *   @param model_key Key to the self-contained model archive already uploaded to H2O.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/generic")
  Call trainGeneric(
    @Field("path") String path,
    @Field("model_key") String model_key,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/generic")
  Call trainGeneric();

  /** 
   * Resume grid search for Generic model.
   *   @param path Path to file with self-contained model archive.
   *   @param model_key Key to the self-contained model archive already uploaded to H2O.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/generic/resume")
  Call resumeGeneric(
    @Field("path") String path,
    @Field("model_key") String model_key,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/generic/resume")
  Call resumeGeneric();

  /** 
   * Run grid search for GAM model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param startval double array to initialize coefficients for GAM.
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param max_iterations Maximum number of iterations
   *   @param beta_epsilon Converge if  beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
   *                       IRLSM solver
   *   @param objective_epsilon Converge if  objective value changes less than this. Default indicates: If lambda_search
   *                            is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
   *                            set to False and lambda is equal to zero, the value of objective_epsilon is set to
   *                            .000001, for any other value of lambda the default value of objective_epsilon is set to
   *                            .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
   *                           zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
   *                           value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
   *                           and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
   *   @param link Link function.
   *   @param intercept Include constant term in the model
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM.  If false, build the
   *                     next model for next set of alpha/lambda values starting from the values provided by current
   *                     model.  If true will start GLM model from scratch.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero). Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
   *   @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
   *                               found knots_for_gam.  Default to false.
   *   @param num_knots Number of knots for gam predictors.  If specified, must specify one for each gam predictor.  For
   *                    monotone I-splines, mininum = 2, for cs spline, minimum = 3.  For thin plate, minimum is size of
   *                    polynomial basis + 2.
   *   @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
   *                        the same size as gam_columns.  For I-splines, the spline_orders will be the same as the
   *                        polynomials used to generate the splines.  For M-splines, the polynomials used to generate
   *                        the splines will be spline_order-1.  Values for bs=0 or 1 will be ignored.
   *   @param splines_non_negative Valid for I-spline (bs=2) only.  True if the I-splines are monotonically increasing
   *                               (and monotonically non-decreasing) and False if the I-splines are monotonically
   *                               decreasing (and monotonically non-increasing).  If specified, must be the same size
   *                               as gam_columns.  Values for other spline types will be ignored.  Default to true.
   *   @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
   *                      like {{'c1'},{'c2','c3'},{'c4'},...}
   *   @param scale Smoothing parameter for gam predictors.  If specified, must be of the same length as gam_columns
   *   @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
   *             monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
   *             https://github.com/h2oai/h2o-3/issues/6926).  If specified, must be the same size as gam_columns
   *   @param keep_gam_cols Save keys of model matrix
   *   @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
   *   @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
   *   @param knot_ids Array storing frame keys of knots.  One for each gam column set specified in gam_columns
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/gam")
  Call trainGam(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("startval") double[] startval,
    @Field("lambda_search") boolean lambda_search,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("standardize") boolean standardize,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("intercept") boolean intercept,
    @Field("prior") double prior,
    @Field("cold_start") boolean cold_start,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("store_knot_locations") boolean store_knot_locations,
    @Field("num_knots") int[] num_knots,
    @Field("spline_orders") int[] spline_orders,
    @Field("splines_non_negative") boolean[] splines_non_negative,
    @Field("gam_columns") String[][] gam_columns,
    @Field("scale") double[] scale,
    @Field("bs") int[] bs,
    @Field("keep_gam_cols") boolean keep_gam_cols,
    @Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
    @Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
    @Field("knot_ids") String[] knot_ids,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/gam")
  Call trainGam(@Field("gam_columns") String[][] gam_columns);

  /** 
   * Resume grid search for GAM model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param startval double array to initialize coefficients for GAM.
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param max_iterations Maximum number of iterations
   *   @param beta_epsilon Converge if  beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
   *                       IRLSM solver
   *   @param objective_epsilon Converge if  objective value changes less than this. Default indicates: If lambda_search
   *                            is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
   *                            set to False and lambda is equal to zero, the value of objective_epsilon is set to
   *                            .000001, for any other value of lambda the default value of objective_epsilon is set to
   *                            .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
   *                           zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
   *                           value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
   *                           and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
   *   @param link Link function.
   *   @param intercept Include constant term in the model
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM.  If false, build the
   *                     next model for next set of alpha/lambda values starting from the values provided by current
   *                     model.  If true will start GLM model from scratch.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero). Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
   *                       for the list.
   *   @param interaction_pairs A list of pairwise (first order) column interactions.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
   *   @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
   *                               found knots_for_gam.  Default to false.
   *   @param num_knots Number of knots for gam predictors.  If specified, must specify one for each gam predictor.  For
   *                    monotone I-splines, mininum = 2, for cs spline, minimum = 3.  For thin plate, minimum is size of
   *                    polynomial basis + 2.
   *   @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
   *                        the same size as gam_columns.  For I-splines, the spline_orders will be the same as the
   *                        polynomials used to generate the splines.  For M-splines, the polynomials used to generate
   *                        the splines will be spline_order-1.  Values for bs=0 or 1 will be ignored.
   *   @param splines_non_negative Valid for I-spline (bs=2) only.  True if the I-splines are monotonically increasing
   *                               (and monotonically non-decreasing) and False if the I-splines are monotonically
   *                               decreasing (and monotonically non-increasing).  If specified, must be the same size
   *                               as gam_columns.  Values for other spline types will be ignored.  Default to true.
   *   @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
   *                      like {{'c1'},{'c2','c3'},{'c4'},...}
   *   @param scale Smoothing parameter for gam predictors.  If specified, must be of the same length as gam_columns
   *   @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
   *             monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
   *             https://github.com/h2oai/h2o-3/issues/6926).  If specified, must be the same size as gam_columns
   *   @param keep_gam_cols Save keys of model matrix
   *   @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
   *   @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
   *   @param knot_ids Array storing frame keys of knots.  One for each gam column set specified in gam_columns
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/gam/resume")
  Call resumeGam(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("startval") double[] startval,
    @Field("lambda_search") boolean lambda_search,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("standardize") boolean standardize,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("intercept") boolean intercept,
    @Field("prior") double prior,
    @Field("cold_start") boolean cold_start,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("interactions") String[] interactions,
    @Field("interaction_pairs") StringPairV3[] interaction_pairs,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("store_knot_locations") boolean store_knot_locations,
    @Field("num_knots") int[] num_knots,
    @Field("spline_orders") int[] spline_orders,
    @Field("splines_non_negative") boolean[] splines_non_negative,
    @Field("gam_columns") String[][] gam_columns,
    @Field("scale") double[] scale,
    @Field("bs") int[] bs,
    @Field("keep_gam_cols") boolean keep_gam_cols,
    @Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
    @Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
    @Field("knot_ids") String[] knot_ids,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/gam/resume")
  Call resumeGam(@Field("gam_columns") String[][] gam_columns);

  /** 
   * Run grid search for ANOVAGLM model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param max_iterations Maximum number of iterations
   *   @param link Link function.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
   *                                   only, 3 for three columns and so on...  Default to 2.
   *   @param type Refer to the SS type 1, 2, 3, or 4.  We are currently only supporting 3
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
   *   @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
   *   @param nparallelism Number of models to build in parallel.  Default to 4.  Adjust according to your system.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/anovaglm")
  Call trainAnovaglm(
    @Field("seed") long seed,
    @Field("standardize") boolean standardize,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("solver") GLMSolver solver,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("max_iterations") int max_iterations,
    @Field("link") GLMLink link,
    @Field("prior") double prior,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("highest_interaction_term") int highest_interaction_term,
    @Field("type") int type,
    @Field("early_stopping") boolean early_stopping,
    @Field("save_transformed_framekeys") boolean save_transformed_framekeys,
    @Field("nparallelism") int nparallelism,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/anovaglm")
  Call trainAnovaglm();

  /** 
   * Resume grid search for ANOVAGLM model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param family Family. Use binomial for classification with logistic regression, others are for regression
   *                 problems.
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param max_iterations Maximum number of iterations
   *   @param link Link function.
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
   *                                   only, 3 for three columns and so on...  Default to 2.
   *   @param type Refer to the SS type 1, 2, 3, or 4.  We are currently only supporting 3
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
   *   @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
   *   @param nparallelism Number of models to build in parallel.  Default to 4.  Adjust according to your system.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/anovaglm/resume")
  Call resumeAnovaglm(
    @Field("seed") long seed,
    @Field("standardize") boolean standardize,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("solver") GLMSolver solver,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("max_iterations") int max_iterations,
    @Field("link") GLMLink link,
    @Field("prior") double prior,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("highest_interaction_term") int highest_interaction_term,
    @Field("type") int type,
    @Field("early_stopping") boolean early_stopping,
    @Field("save_transformed_framekeys") boolean save_transformed_framekeys,
    @Field("nparallelism") int nparallelism,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/anovaglm/resume")
  Call resumeAnovaglm();

  /** 
   * Run grid search for PSVM model.
   *   @param hyper_param Penalty parameter C of the error term
   *   @param kernel_type Type of used kernel
   *   @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
   *   @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
   *                     sqrt(#rows)).
   *   @param positive_weight Weight of positive (+1) class of observations
   *   @param negative_weight Weight of positive (-1) class of observations
   *   @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
   *   @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
   *   @param max_iterations Maximum number of iteration of the algorithm
   *   @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
   *   @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
   *   @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
   *   @param mu_factor Increasing factor mu
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/psvm")
  Call trainPsvm(
    @Field("hyper_param") double hyper_param,
    @Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
    @Field("gamma") double gamma,
    @Field("rank_ratio") double rank_ratio,
    @Field("positive_weight") double positive_weight,
    @Field("negative_weight") double negative_weight,
    @Field("disable_training_metrics") boolean disable_training_metrics,
    @Field("sv_threshold") double sv_threshold,
    @Field("max_iterations") int max_iterations,
    @Field("fact_threshold") double fact_threshold,
    @Field("feasible_threshold") double feasible_threshold,
    @Field("surrogate_gap_threshold") double surrogate_gap_threshold,
    @Field("mu_factor") double mu_factor,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/psvm")
  Call trainPsvm();

  /** 
   * Resume grid search for PSVM model.
   *   @param hyper_param Penalty parameter C of the error term
   *   @param kernel_type Type of used kernel
   *   @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
   *   @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
   *                     sqrt(#rows)).
   *   @param positive_weight Weight of positive (+1) class of observations
   *   @param negative_weight Weight of positive (-1) class of observations
   *   @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
   *   @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
   *   @param max_iterations Maximum number of iteration of the algorithm
   *   @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
   *   @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
   *   @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
   *   @param mu_factor Increasing factor mu
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/psvm/resume")
  Call resumePsvm(
    @Field("hyper_param") double hyper_param,
    @Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
    @Field("gamma") double gamma,
    @Field("rank_ratio") double rank_ratio,
    @Field("positive_weight") double positive_weight,
    @Field("negative_weight") double negative_weight,
    @Field("disable_training_metrics") boolean disable_training_metrics,
    @Field("sv_threshold") double sv_threshold,
    @Field("max_iterations") int max_iterations,
    @Field("fact_threshold") double fact_threshold,
    @Field("feasible_threshold") double feasible_threshold,
    @Field("surrogate_gap_threshold") double surrogate_gap_threshold,
    @Field("mu_factor") double mu_factor,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/psvm/resume")
  Call resumePsvm();

  /** 
   * Run grid search for RuleFit model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param algorithm The algorithm to use to generate rules.
   *   @param min_rule_length Minimum length of rules. Defaults to 3.
   *   @param max_rule_length Maximum length of rules. Defaults to 3.
   *   @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
   *                        selected
   *                        by diminishing returns in model deviance.
   *   @param model_type Specifies type of base learners in the ensemble.
   *   @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
   *   @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
   *   @param lambda Lambda for LASSO regressor.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/rulefit")
  Call trainRulefit(
    @Field("seed") long seed,
    @Field("algorithm") RuleFitModelAlgorithm algorithm,
    @Field("min_rule_length") int min_rule_length,
    @Field("max_rule_length") int max_rule_length,
    @Field("max_num_rules") int max_num_rules,
    @Field("model_type") RuleFitModelModelType model_type,
    @Field("rule_generation_ntrees") int rule_generation_ntrees,
    @Field("remove_duplicates") boolean remove_duplicates,
    @Field("lambda") double[] lambda,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/rulefit")
  Call trainRulefit();

  /** 
   * Resume grid search for RuleFit model.
   *   @param seed Seed for pseudo random number generator (if applicable).
   *   @param algorithm The algorithm to use to generate rules.
   *   @param min_rule_length Minimum length of rules. Defaults to 3.
   *   @param max_rule_length Maximum length of rules. Defaults to 3.
   *   @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
   *                        selected
   *                        by diminishing returns in model deviance.
   *   @param model_type Specifies type of base learners in the ensemble.
   *   @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
   *   @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
   *   @param lambda Lambda for LASSO regressor.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/rulefit/resume")
  Call resumeRulefit(
    @Field("seed") long seed,
    @Field("algorithm") RuleFitModelAlgorithm algorithm,
    @Field("min_rule_length") int min_rule_length,
    @Field("max_rule_length") int max_rule_length,
    @Field("max_num_rules") int max_num_rules,
    @Field("model_type") RuleFitModelModelType model_type,
    @Field("rule_generation_ntrees") int rule_generation_ntrees,
    @Field("remove_duplicates") boolean remove_duplicates,
    @Field("lambda") double[] lambda,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/rulefit/resume")
  Call resumeRulefit();

  /** 
   * Run grid search for UpliftDRF model.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
   *                 for classification and p/3 for regression (where p is the # of predictors
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
   *                           tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
   *                           groups.
   *   @param uplift_metric Divergence metric used to find best split when building an uplift tree.
   *   @param auuc_type Metric used to calculate Area Under Uplift Curve.
   *   @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/upliftdrf")
  Call trainUpliftdrf(
    @Field("mtries") int mtries,
    @Field("sample_rate") double sample_rate,
    @Field("treatment_column") String treatment_column,
    @Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
    @Field("auuc_type") AUUCType auuc_type,
    @Field("auuc_nbins") int auuc_nbins,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/upliftdrf")
  Call trainUpliftdrf(@Field("treatment_column") String treatment_column);

  /** 
   * Resume grid search for UpliftDRF model.
   *   @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
   *                 for classification and p/3 for regression (where p is the # of predictors
   *   @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
   *   @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
   *                           tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
   *                           groups.
   *   @param uplift_metric Divergence metric used to find best split when building an uplift tree.
   *   @param auuc_type Metric used to calculate Area Under Uplift Curve.
   *   @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param ntrees Number of trees.
   *   @param max_depth Maximum tree depth (0 for unlimited).
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
   *                best point
   *   @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
   *                          root level, then decrease by factor of two per level
   *   @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
   *                     point. Higher values can lead to more overfitting.
   *   @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
   *                      stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
   *                      trees when the R^2 metric equals or exceeds this
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
   *                              datasets.
   *   @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
   *                                1.0), for each tree
   *   @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
   *   @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
   *                                           0.0 and <= 2.0)
   *   @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
   *   @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
   *   @param histogram_type What type of histogram to use for finding optimal split points
   *   @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
   *                          probabilities. Calibration can provide more accurate estimates of class probabilities.
   *   @param calibration_frame Data for model calibration
   *   @param calibration_method Calibration method to use
   *   @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
   *                                  the response column is a constant value.If disabled, then model will train
   *                                  regardless of the response column being a constant value or not.
   *   @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
   *                                      running. In case of cluster shutdown, this checkpoint can be used to restart
   *                                      training.
   *   @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
   *                                                only when in_training_checkpoints_dir is defined
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/upliftdrf/resume")
  Call resumeUpliftdrf(
    @Field("mtries") int mtries,
    @Field("sample_rate") double sample_rate,
    @Field("treatment_column") String treatment_column,
    @Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
    @Field("auuc_type") AUUCType auuc_type,
    @Field("auuc_nbins") int auuc_nbins,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("ntrees") int ntrees,
    @Field("max_depth") int max_depth,
    @Field("min_rows") double min_rows,
    @Field("nbins") int nbins,
    @Field("nbins_top_level") int nbins_top_level,
    @Field("nbins_cats") int nbins_cats,
    @Field("r2_stopping") double r2_stopping,
    @Field("seed") long seed,
    @Field("build_tree_one_node") boolean build_tree_one_node,
    @Field("sample_rate_per_class") double[] sample_rate_per_class,
    @Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
    @Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
    @Field("score_tree_interval") int score_tree_interval,
    @Field("min_split_improvement") double min_split_improvement,
    @Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
    @Field("calibrate_model") boolean calibrate_model,
    @Field("calibration_frame") String calibration_frame,
    @Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
    @Field("check_constant_response") boolean check_constant_response,
    @Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
    @Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/upliftdrf/resume")
  Call resumeUpliftdrf(@Field("treatment_column") String treatment_column);

  /** 
   * Run grid search for ModelSelection model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param family Family. For maxr/maxrsweep, only gaussian.  For backward, ordinal and multinomial families are not
   *                 supported
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param multinode_mode For maxrsweep only.  If enabled, will attempt to perform sweeping action using multiple
   *                         nodes in the cluster.  Defaults to false.
   *   @param build_glm_model For maxrsweep mode only.  If true, will return full blown GLM models with the desired
   *                          predictorsubsets.  If false, only the predictor subsets, predictor coefficients are
   *                          returned.  This is forspeeding up the model selection process.  The users can choose to
   *                          build the GLM models themselvesby using the predictor subsets themselves.  Defaults to
   *                          false.
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param cold_start Only applicable to multiple alpha/lambda values.  If false, build the next model for next set
   *                     of alpha/lambda values starting from the values provided by current model.  If true will start
   *                     GLM model from scratch.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param max_iterations Maximum number of iterations
   *   @param beta_epsilon Converge if  beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
   *                       IRLSM solver
   *   @param objective_epsilon Converge if  objective value changes less than this. Default (of -1.0) indicates: If
   *                            lambda_search is set to True the value of objective_epsilon is set to .0001. If the
   *                            lambda_search is set to False and lambda is equal to zero, the value of
   *                            objective_epsilon is set to .000001, for any other value of lambda the default value of
   *                            objective_epsilon is set to .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
   *                           is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
   *                           the default value is .0001. If lambda_search is set to True, the conditional values above
   *                           are 1E-8 and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
   *   @param link Link function.
   *   @param startval double array to initialize fixed and random coefficients for HGLM, coefficients for GLM.
   *   @param calc_like if true, will return likelihood function value for HGLM.
   *   @param mode Mode: Used to choose model selection algorithms to use.  Options include 'allsubsets' for all
   *               subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
   *               cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
   *               replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
   *   @param intercept Include constant term in the model
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero). Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
   *   @param max_predictor_number Maximum number of predictors to be considered when building GLM models.  Defaults to
   *                               1.
   *   @param min_predictor_number For mode = 'backward' only.  Minimum number of predictors to be considered when
   *                               building GLM models starting with all predictors to be included.  Defaults to 1.
   *   @param nparallelism number of models to build in parallel.  Defaults to 0.0 which is adaptive to the system
   *                       capability
   *   @param p_values_threshold For mode='backward' only.  If specified, will stop the model building process when all
   *                             coefficientsp-values drop below this threshold
   *   @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
   *                    in the dataset.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/modelselection")
  Call trainModelselection(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("multinode_mode") boolean multinode_mode,
    @Field("build_glm_model") boolean build_glm_model,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("score_iteration_interval") int score_iteration_interval,
    @Field("standardize") boolean standardize,
    @Field("cold_start") boolean cold_start,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("startval") double[] startval,
    @Field("calc_like") boolean calc_like,
    @Field("mode") ModelSelectionMode mode,
    @Field("intercept") boolean intercept,
    @Field("prior") double prior,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("max_predictor_number") int max_predictor_number,
    @Field("min_predictor_number") int min_predictor_number,
    @Field("nparallelism") int nparallelism,
    @Field("p_values_threshold") double p_values_threshold,
    @Field("influence") GLMInfluence influence,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/modelselection")
  Call trainModelselection();

  /** 
   * Resume grid search for ModelSelection model.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param family Family. For maxr/maxrsweep, only gaussian.  For backward, ordinal and multinomial families are not
   *                 supported
   *   @param tweedie_variance_power Tweedie variance power
   *   @param tweedie_link_power Tweedie link power
   *   @param theta Theta
   *   @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
   *                 with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
   *                 datasets with many columns.
   *   @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
   *                alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
   *                specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
   *                0.5 otherwise.
   *   @param lambda Regularization strength
   *   @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
   *   @param multinode_mode For maxrsweep only.  If enabled, will attempt to perform sweeping action using multiple
   *                         nodes in the cluster.  Defaults to false.
   *   @param build_glm_model For maxrsweep mode only.  If true, will return full blown GLM models with the desired
   *                          predictorsubsets.  If false, only the predictor subsets, predictor coefficients are
   *                          returned.  This is forspeeding up the model selection process.  The users can choose to
   *                          build the GLM models themselvesby using the predictor subsets themselves.  Defaults to
   *                          false.
   *   @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
   *   @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
   *                   set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
   *                   otherwise it is set to 100.
   *   @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
   *   @param standardize Standardize numeric columns to have zero mean and unit variance
   *   @param cold_start Only applicable to multiple alpha/lambda values.  If false, build the next model for next set
   *                     of alpha/lambda values starting from the values provided by current model.  If true will start
   *                     GLM model from scratch.
   *   @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
   *   @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
   *                      of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
   *   @param non_negative Restrict coefficients (not intercept) to be non-negative
   *   @param max_iterations Maximum number of iterations
   *   @param beta_epsilon Converge if  beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
   *                       IRLSM solver
   *   @param objective_epsilon Converge if  objective value changes less than this. Default (of -1.0) indicates: If
   *                            lambda_search is set to True the value of objective_epsilon is set to .0001. If the
   *                            lambda_search is set to False and lambda is equal to zero, the value of
   *                            objective_epsilon is set to .000001, for any other value of lambda the default value of
   *                            objective_epsilon is set to .0001.
   *   @param gradient_epsilon Converge if  objective changes less (using L-infinity norm) than this, ONLY applies to
   *                           L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
   *                           is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
   *                           the default value is .0001. If lambda_search is set to True, the conditional values above
   *                           are 1E-8 and 1E-6 respectively.
   *   @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
   *   @param link Link function.
   *   @param startval double array to initialize fixed and random coefficients for HGLM, coefficients for GLM.
   *   @param calc_like if true, will return likelihood function value for HGLM.
   *   @param mode Mode: Used to choose model selection algorithms to use.  Options include 'allsubsets' for all
   *               subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
   *               cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
   *               replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
   *   @param intercept Include constant term in the model
   *   @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
   *                and the mean of response does not reflect reality.
   *   @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
   *                           lambda that drives all coefficients to zero). Default indicates: if the number of
   *                           observations is greater than the number of variables, then lambda_min_ratio is set to
   *                           0.0001; if the number of observations is less than the number of variables, then
   *                           lambda_min_ratio is set to 0.01.
   *   @param beta_constraints Beta constraints
   *   @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
   *                                to prevent expensive model building with many predictors. Default indicates: If the
   *                                IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
   *                                is set to 100000000.
   *   @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
   *   @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
   *                                 specified, sampling factors will be automatically computed to obtain class balance
   *                                 during training. Requires balance_classes.
   *   @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
   *                                 less than 1.0). Requires balance_classes.
   *   @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
   *                                    the Logs
   *   @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
   *   @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
   *   @param max_predictor_number Maximum number of predictors to be considered when building GLM models.  Defaults to
   *                               1.
   *   @param min_predictor_number For mode = 'backward' only.  Minimum number of predictors to be considered when
   *                               building GLM models starting with all predictors to be included.  Defaults to 1.
   *   @param nparallelism number of models to build in parallel.  Defaults to 0.0 which is adaptive to the system
   *                       capability
   *   @param p_values_threshold For mode='backward' only.  If specified, will stop the model building process when all
   *                             coefficientsp-values drop below this threshold
   *   @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
   *                    in the dataset.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/modelselection/resume")
  Call resumeModelselection(
    @Field("seed") long seed,
    @Field("family") GLMFamily family,
    @Field("tweedie_variance_power") double tweedie_variance_power,
    @Field("tweedie_link_power") double tweedie_link_power,
    @Field("theta") double theta,
    @Field("solver") GLMSolver solver,
    @Field("alpha") double[] alpha,
    @Field("lambda") double[] lambda,
    @Field("lambda_search") boolean lambda_search,
    @Field("multinode_mode") boolean multinode_mode,
    @Field("build_glm_model") boolean build_glm_model,
    @Field("early_stopping") boolean early_stopping,
    @Field("nlambdas") int nlambdas,
    @Field("score_iteration_interval") int score_iteration_interval,
    @Field("standardize") boolean standardize,
    @Field("cold_start") boolean cold_start,
    @Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
    @Field("plug_values") String plug_values,
    @Field("non_negative") boolean non_negative,
    @Field("max_iterations") int max_iterations,
    @Field("beta_epsilon") double beta_epsilon,
    @Field("objective_epsilon") double objective_epsilon,
    @Field("gradient_epsilon") double gradient_epsilon,
    @Field("obj_reg") double obj_reg,
    @Field("link") GLMLink link,
    @Field("startval") double[] startval,
    @Field("calc_like") boolean calc_like,
    @Field("mode") ModelSelectionMode mode,
    @Field("intercept") boolean intercept,
    @Field("prior") double prior,
    @Field("lambda_min_ratio") double lambda_min_ratio,
    @Field("beta_constraints") String beta_constraints,
    @Field("max_active_predictors") int max_active_predictors,
    @Field("balance_classes") boolean balance_classes,
    @Field("class_sampling_factors") float[] class_sampling_factors,
    @Field("max_after_balance_size") float max_after_balance_size,
    @Field("max_confusion_matrix_size") int max_confusion_matrix_size,
    @Field("compute_p_values") boolean compute_p_values,
    @Field("remove_collinear_columns") boolean remove_collinear_columns,
    @Field("max_predictor_number") int max_predictor_number,
    @Field("min_predictor_number") int min_predictor_number,
    @Field("nparallelism") int nparallelism,
    @Field("p_values_threshold") double p_values_threshold,
    @Field("influence") GLMInfluence influence,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/modelselection/resume")
  Call resumeModelselection();

  /** 
   * Run grid search for IsotonicRegression model.
   *   @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/isotonicregression")
  Call trainIsotonicregression(
    @Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/isotonicregression")
  Call trainIsotonicregression();

  /** 
   * Resume grid search for IsotonicRegression model.
   *   @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/isotonicregression/resume")
  Call resumeIsotonicregression(
    @Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/isotonicregression/resume")
  Call resumeIsotonicregression();

  /** 
   * Run grid search for DT model.
   *   @param seed Seed for random numbers (affects sampling)
   *   @param max_depth Max depth of tree.
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/dt")
  Call trainDt(
    @Field("seed") long seed,
    @Field("max_depth") int max_depth,
    @Field("min_rows") int min_rows,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/dt")
  Call trainDt();

  /** 
   * Resume grid search for DT model.
   *   @param seed Seed for random numbers (affects sampling)
   *   @param max_depth Max depth of tree.
   *   @param min_rows Fewest allowed (weighted) observations in a leaf.
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/dt/resume")
  Call resumeDt(
    @Field("seed") long seed,
    @Field("max_depth") int max_depth,
    @Field("min_rows") int min_rows,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/dt/resume")
  Call resumeDt();

  /** 
   * Run grid search for AdaBoost model.
   *   @param nlearners Number of AdaBoost weak learners.
   *   @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
   *   @param learn_rate Learning rate (from 0.0 to 1.0)
   *   @param weak_learner_params Customized parameters for the weak_learner algorithm.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/adaboost")
  Call trainAdaboost(
    @Field("nlearners") int nlearners,
    @Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
    @Field("learn_rate") double learn_rate,
    @Field("weak_learner_params") String weak_learner_params,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/adaboost")
  Call trainAdaboost();

  /** 
   * Resume grid search for AdaBoost model.
   *   @param nlearners Number of AdaBoost weak learners.
   *   @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
   *   @param learn_rate Learning rate (from 0.0 to 1.0)
   *   @param weak_learner_params Customized parameters for the weak_learner algorithm.
   *   @param seed Seed for pseudo random number generator (if applicable)
   *   @param model_id Destination id for this model; auto-generated if not specified.
   *   @param training_frame Id of the training data frame.
   *   @param validation_frame Id of the validation data frame.
   *   @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
   *   @param keep_cross_validation_models Whether to keep the cross-validation models.
   *   @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
   *   @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
   *   @param parallelize_cross_validation Allow parallel training of cross-validation models
   *   @param distribution Distribution function
   *   @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
   *   @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
   *   @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
   *                      between 0 and 1).
   *   @param response_column Response variable column.
   *   @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
   *                         excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
   *                         to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
   *                         observation weights and do not increase the size of the data frame. This is typically the
   *                         number of times a row is repeated, but non-integer values are supported as well. During
   *                         training, rows with higher weights matter more, due to the larger loss function pre-factor.
   *                         If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
   *                         is incorrect. To get an accurate prediction, remove all rows with weight == 0.
   *   @param offset_column Offset column. This will be added to the combination of columns before applying the link
   *                        function.
   *   @param fold_column Column with cross-validation fold index assignment per observation.
   *   @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
   *                          option will stratify the folds based on the response variable, for classification
   *                          problems.
   *   @param categorical_encoding Encoding scheme for categorical features
   *   @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
   *                                 for model training. Only used for categorical_encoding == EnumLimited.
   *   @param ignored_columns Names of columns to ignore for training.
   *   @param ignore_const_cols Ignore constant columns.
   *   @param score_each_iteration Whether to score during each iteration of model training.
   *   @param checkpoint Model checkpoint to resume training with.
   *   @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
   *                          length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
   *                          to disable)
   *   @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
   *   @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
   *                          regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
   *                          can only be used in GBM and DRF with the Python client.
   *   @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
   *                             not at least this much)
   *   @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
   *                         binning.
   *   @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
   *   @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
   *   @param export_checkpoints_dir Automatically export generated models to this directory.
   *   @param auc_type Set default multinomial AUC type.
   */
  @FormUrlEncoded
  @POST("/99/Grid/adaboost/resume")
  Call resumeAdaboost(
    @Field("nlearners") int nlearners,
    @Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
    @Field("learn_rate") double learn_rate,
    @Field("weak_learner_params") String weak_learner_params,
    @Field("seed") long seed,
    @Field("model_id") String model_id,
    @Field("training_frame") String training_frame,
    @Field("validation_frame") String validation_frame,
    @Field("nfolds") int nfolds,
    @Field("keep_cross_validation_models") boolean keep_cross_validation_models,
    @Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
    @Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
    @Field("parallelize_cross_validation") boolean parallelize_cross_validation,
    @Field("distribution") GenmodelutilsDistributionFamily distribution,
    @Field("tweedie_power") double tweedie_power,
    @Field("quantile_alpha") double quantile_alpha,
    @Field("huber_alpha") double huber_alpha,
    @Field("response_column") String response_column,
    @Field("weights_column") String weights_column,
    @Field("offset_column") String offset_column,
    @Field("fold_column") String fold_column,
    @Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
    @Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
    @Field("max_categorical_levels") int max_categorical_levels,
    @Field("ignored_columns") String[] ignored_columns,
    @Field("ignore_const_cols") boolean ignore_const_cols,
    @Field("score_each_iteration") boolean score_each_iteration,
    @Field("checkpoint") String checkpoint,
    @Field("stopping_rounds") int stopping_rounds,
    @Field("max_runtime_secs") double max_runtime_secs,
    @Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
    @Field("stopping_tolerance") double stopping_tolerance,
    @Field("gainslift_bins") int gainslift_bins,
    @Field("custom_metric_func") String custom_metric_func,
    @Field("custom_distribution_func") String custom_distribution_func,
    @Field("export_checkpoints_dir") String export_checkpoints_dir,
    @Field("auc_type") MultinomialAucType auc_type
  );

  @FormUrlEncoded
  @POST("/99/Grid/adaboost/resume")
  Call resumeAdaboost();

  /** 
   * Import previously saved grid model
   *   @param grid_path Full path to the file containing saved Grid
   *   @param load_params_references If true will also load saved objects referenced by params. Will fail with an error
   *                                 if grid was saved without objects referenced by params.
   */
  @FormUrlEncoded
  @POST("/3/Grid.bin/import")
  Call importGrid(
    @Field("grid_path") String grid_path,
    @Field("load_params_references") boolean load_params_references
  );

  @FormUrlEncoded
  @POST("/3/Grid.bin/import")
  Call importGrid(@Field("grid_path") String grid_path);

  /** 
   * Export a Grid and its models.
   *   @param grid_id ID of the Grid to load from the directory
   *   @param grid_directory Path to the directory with saved Grid search
   *   @param save_params_references True if objects referenced by params should also be saved.
   *   @param export_cross_validation_predictions Flag indicating whether the exported model artifacts should also
   *                                              include CV Holdout Frame predictions
   */
  @FormUrlEncoded
  @POST("/3/Grid.bin/{grid_id}/export")
  Call exportGrid(
    @Path("grid_id") String grid_id,
    @Field("grid_directory") String grid_directory,
    @Field("save_params_references") boolean save_params_references,
    @Field("export_cross_validation_predictions") boolean export_cross_validation_predictions
  );

  @FormUrlEncoded
  @POST("/3/Grid.bin/{grid_id}/export")
  Call exportGrid(
    @Path("grid_id") String grid_id,
    @Field("grid_directory") String grid_directory
  );


  @SuppressWarnings("unused")
  class Helper {
    /**
     * Run grid search for XGBoost model.
     */
    public static Call trainXgboost(Grid z, XGBoostParametersV3 p) {
      return z.trainXgboost(
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.minChildWeight,
        p.learnRate,
        p.eta,
        p.sampleRate,
        p.subsample,
        p.colSampleRate,
        p.colsampleBylevel,
        p.colSampleRatePerTree,
        p.colsampleBytree,
        p.colsampleBynode,
        p.monotoneConstraints,
        p.maxAbsLeafnodePred,
        p.maxDeltaStep,
        p.scoreTreeInterval,
        p.seed,
        p.minSplitImprovement,
        p.gamma,
        p.nthread,
        p.buildTreeOneNode,
        p.saveMatrixDirectory,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.maxBins,
        p.maxLeaves,
        p.treeMethod,
        p.growPolicy,
        p.booster,
        p.regLambda,
        p.regAlpha,
        p.quietMode,
        p.sampleType,
        p.normalizeType,
        p.rateDrop,
        p.oneDrop,
        p.skipDrop,
        p.dmatrixType,
        p.backend,
        p.gpuId,
        p.interactionConstraints,
        p.scalePosWeight,
        p.evalMetric,
        p.scoreEvalMetricOnly,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for XGBoost model.
     */
    public static Call resumeXgboost(Grid z, XGBoostParametersV3 p) {
      return z.resumeXgboost(
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.minChildWeight,
        p.learnRate,
        p.eta,
        p.sampleRate,
        p.subsample,
        p.colSampleRate,
        p.colsampleBylevel,
        p.colSampleRatePerTree,
        p.colsampleBytree,
        p.colsampleBynode,
        p.monotoneConstraints,
        p.maxAbsLeafnodePred,
        p.maxDeltaStep,
        p.scoreTreeInterval,
        p.seed,
        p.minSplitImprovement,
        p.gamma,
        p.nthread,
        p.buildTreeOneNode,
        p.saveMatrixDirectory,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.maxBins,
        p.maxLeaves,
        p.treeMethod,
        p.growPolicy,
        p.booster,
        p.regLambda,
        p.regAlpha,
        p.quietMode,
        p.sampleType,
        p.normalizeType,
        p.rateDrop,
        p.oneDrop,
        p.skipDrop,
        p.dmatrixType,
        p.backend,
        p.gpuId,
        p.interactionConstraints,
        p.scalePosWeight,
        p.evalMetric,
        p.scoreEvalMetricOnly,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for Infogram model.
     */
    public static Call trainInfogram(Grid z, InfogramParametersV3 p) {
      return z.trainInfogram(
        p.seed,
        p.standardize,
        (p.plugValues == null? null : p.plugValues.name),
        p.maxIterations,
        p.prior,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.algorithm,
        p.algorithmParams,
        p.protectedColumns,
        p.totalInformationThreshold,
        p.netInformationThreshold,
        p.relevanceIndexThreshold,
        p.safetyIndexThreshold,
        p.dataFraction,
        p.topNFeatures,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for Infogram model.
     */
    public static Call resumeInfogram(Grid z, InfogramParametersV3 p) {
      return z.resumeInfogram(
        p.seed,
        p.standardize,
        (p.plugValues == null? null : p.plugValues.name),
        p.maxIterations,
        p.prior,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.algorithm,
        p.algorithmParams,
        p.protectedColumns,
        p.totalInformationThreshold,
        p.netInformationThreshold,
        p.relevanceIndexThreshold,
        p.safetyIndexThreshold,
        p.dataFraction,
        p.topNFeatures,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for TargetEncoder model.
     */
    public static Call trainTargetencoder(Grid z, TargetEncoderParametersV3 p) {
      return z.trainTargetencoder(
        p.columnsToEncode,
        p.keepOriginalCategoricalColumns,
        p.blending,
        p.inflectionPoint,
        p.smoothing,
        p.dataLeakageHandling,
        p.noise,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for TargetEncoder model.
     */
    public static Call resumeTargetencoder(Grid z, TargetEncoderParametersV3 p) {
      return z.resumeTargetencoder(
        p.columnsToEncode,
        p.keepOriginalCategoricalColumns,
        p.blending,
        p.inflectionPoint,
        p.smoothing,
        p.dataLeakageHandling,
        p.noise,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for DeepLearning model.
     */
    public static Call trainDeeplearning(Grid z, DeepLearningParametersV3 p) {
      return z.trainDeeplearning(
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.activation,
        p.hidden,
        p.epochs,
        p.trainSamplesPerIteration,
        p.targetRatioCommToComp,
        p.seed,
        p.adaptiveRate,
        p.rho,
        p.epsilon,
        p.rate,
        p.rateAnnealing,
        p.rateDecay,
        p.momentumStart,
        p.momentumRamp,
        p.momentumStable,
        p.nesterovAcceleratedGradient,
        p.inputDropoutRatio,
        p.hiddenDropoutRatios,
        p.l1,
        p.l2,
        p.maxW2,
        p.initialWeightDistribution,
        p.initialWeightScale,
        (p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
        (p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
        p.loss,
        p.scoreInterval,
        p.scoreTrainingSamples,
        p.scoreValidationSamples,
        p.scoreDutyCycle,
        p.classificationStop,
        p.regressionStop,
        p.quietMode,
        p.scoreValidationSampling,
        p.overwriteWithBestModel,
        p.autoencoder,
        p.useAllFactorLevels,
        p.standardize,
        p.diagnostics,
        p.variableImportances,
        p.fastMode,
        p.forceLoadBalance,
        p.replicateTrainingData,
        p.singleNodeMode,
        p.shuffleTrainingData,
        p.missingValuesHandling,
        p.sparse,
        p.colMajor,
        p.averageActivation,
        p.sparsityBeta,
        p.maxCategoricalFeatures,
        p.reproducible,
        p.exportWeightsAndBiases,
        p.miniBatchSize,
        p.elasticAveraging,
        p.elasticAveragingMovingRate,
        p.elasticAveragingRegularization,
        (p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for DeepLearning model.
     */
    public static Call resumeDeeplearning(Grid z, DeepLearningParametersV3 p) {
      return z.resumeDeeplearning(
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.activation,
        p.hidden,
        p.epochs,
        p.trainSamplesPerIteration,
        p.targetRatioCommToComp,
        p.seed,
        p.adaptiveRate,
        p.rho,
        p.epsilon,
        p.rate,
        p.rateAnnealing,
        p.rateDecay,
        p.momentumStart,
        p.momentumRamp,
        p.momentumStable,
        p.nesterovAcceleratedGradient,
        p.inputDropoutRatio,
        p.hiddenDropoutRatios,
        p.l1,
        p.l2,
        p.maxW2,
        p.initialWeightDistribution,
        p.initialWeightScale,
        (p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
        (p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
        p.loss,
        p.scoreInterval,
        p.scoreTrainingSamples,
        p.scoreValidationSamples,
        p.scoreDutyCycle,
        p.classificationStop,
        p.regressionStop,
        p.quietMode,
        p.scoreValidationSampling,
        p.overwriteWithBestModel,
        p.autoencoder,
        p.useAllFactorLevels,
        p.standardize,
        p.diagnostics,
        p.variableImportances,
        p.fastMode,
        p.forceLoadBalance,
        p.replicateTrainingData,
        p.singleNodeMode,
        p.shuffleTrainingData,
        p.missingValuesHandling,
        p.sparse,
        p.colMajor,
        p.averageActivation,
        p.sparsityBeta,
        p.maxCategoricalFeatures,
        p.reproducible,
        p.exportWeightsAndBiases,
        p.miniBatchSize,
        p.elasticAveraging,
        p.elasticAveragingMovingRate,
        p.elasticAveragingRegularization,
        (p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for GLM model.
     */
    public static Call trainGlm(Grid z, GLMParametersV3 p) {
      return z.trainGlm(
        p.seed,
        p.family,
        p.randFamily,
        p.tweedieVariancePower,
        p.dispersionLearningRate,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.earlyStopping,
        p.nlambdas,
        p.scoreIterationInterval,
        p.standardize,
        p.coldStart,
        p.missingValuesHandling,
        p.influence,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.dispersionParameterMethod,
        p.randLink,
        p.startval,
        p.randomColumns,
        p.calcLike,
        p.generateVariableInflationFactors,
        p.intercept,
        p.buildNullModel,
        p.fixDispersionParameter,
        p.initDispersionParameter,
        p.hglm,
        p.prior,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        (p.linearConstraints == null? null : p.linearConstraints.name),
        p.maxActivePredictors,
        p.interactions,
        p.interactionPairs,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.fixTweedieVariancePower,
        p.removeCollinearColumns,
        p.dispersionEpsilon,
        p.tweedieEpsilon,
        p.maxIterationsDispersion,
        p.generateScoringHistory,
        p.initOptimalGlm,
        p.separateLinearBeta,
        p.constraintEta0,
        p.constraintTau,
        p.constraintAlpha,
        p.constraintBeta,
        p.constraintC0,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for GLM model.
     */
    public static Call resumeGlm(Grid z, GLMParametersV3 p) {
      return z.resumeGlm(
        p.seed,
        p.family,
        p.randFamily,
        p.tweedieVariancePower,
        p.dispersionLearningRate,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.earlyStopping,
        p.nlambdas,
        p.scoreIterationInterval,
        p.standardize,
        p.coldStart,
        p.missingValuesHandling,
        p.influence,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.dispersionParameterMethod,
        p.randLink,
        p.startval,
        p.randomColumns,
        p.calcLike,
        p.generateVariableInflationFactors,
        p.intercept,
        p.buildNullModel,
        p.fixDispersionParameter,
        p.initDispersionParameter,
        p.hglm,
        p.prior,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        (p.linearConstraints == null? null : p.linearConstraints.name),
        p.maxActivePredictors,
        p.interactions,
        p.interactionPairs,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.fixTweedieVariancePower,
        p.removeCollinearColumns,
        p.dispersionEpsilon,
        p.tweedieEpsilon,
        p.maxIterationsDispersion,
        p.generateScoringHistory,
        p.initOptimalGlm,
        p.separateLinearBeta,
        p.constraintEta0,
        p.constraintTau,
        p.constraintAlpha,
        p.constraintBeta,
        p.constraintC0,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for GLRM model.
     */
    public static Call trainGlrm(Grid z, GLRMParametersV3 p) {
      return z.trainGlrm(
        p.transform,
        p.k,
        p.loss,
        p.multiLoss,
        p.lossByCol,
        p.lossByColIdx,
        p.period,
        p.regularizationX,
        p.regularizationY,
        p.gammaX,
        p.gammaY,
        p.maxIterations,
        p.maxUpdates,
        p.initStepSize,
        p.minStepSize,
        p.seed,
        p.init,
        p.svdMethod,
        (p.userY == null? null : p.userY.name),
        (p.userX == null? null : p.userX.name),
        p.loadingName,
        p.representationName,
        p.expandUserY,
        p.imputeOriginal,
        p.recoverSvd,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for GLRM model.
     */
    public static Call resumeGlrm(Grid z, GLRMParametersV3 p) {
      return z.resumeGlrm(
        p.transform,
        p.k,
        p.loss,
        p.multiLoss,
        p.lossByCol,
        p.lossByColIdx,
        p.period,
        p.regularizationX,
        p.regularizationY,
        p.gammaX,
        p.gammaY,
        p.maxIterations,
        p.maxUpdates,
        p.initStepSize,
        p.minStepSize,
        p.seed,
        p.init,
        p.svdMethod,
        (p.userY == null? null : p.userY.name),
        (p.userX == null? null : p.userX.name),
        p.loadingName,
        p.representationName,
        p.expandUserY,
        p.imputeOriginal,
        p.recoverSvd,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for KMeans model.
     */
    public static Call trainKmeans(Grid z, KMeansParametersV3 p) {
      return z.trainKmeans(
        (p.userPoints == null? null : p.userPoints.name),
        p.maxIterations,
        p.standardize,
        p.seed,
        p.init,
        p.estimateK,
        p.clusterSizeConstraints,
        p.k,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for KMeans model.
     */
    public static Call resumeKmeans(Grid z, KMeansParametersV3 p) {
      return z.resumeKmeans(
        (p.userPoints == null? null : p.userPoints.name),
        p.maxIterations,
        p.standardize,
        p.seed,
        p.init,
        p.estimateK,
        p.clusterSizeConstraints,
        p.k,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for NaiveBayes model.
     */
    public static Call trainNaivebayes(Grid z, NaiveBayesParametersV3 p) {
      return z.trainNaivebayes(
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.laplace,
        p.minSdev,
        p.epsSdev,
        p.minProb,
        p.epsProb,
        p.computeMetrics,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for NaiveBayes model.
     */
    public static Call resumeNaivebayes(Grid z, NaiveBayesParametersV3 p) {
      return z.resumeNaivebayes(
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.laplace,
        p.minSdev,
        p.epsSdev,
        p.minProb,
        p.epsProb,
        p.computeMetrics,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for PCA model.
     */
    public static Call trainPca(Grid z, PCAParametersV3 p) {
      return z.trainPca(
        p.transform,
        p.pcaMethod,
        p.pcaImpl,
        p.k,
        p.maxIterations,
        p.seed,
        p.useAllFactorLevels,
        p.computeMetrics,
        p.imputeMissing,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for PCA model.
     */
    public static Call resumePca(Grid z, PCAParametersV3 p) {
      return z.resumePca(
        p.transform,
        p.pcaMethod,
        p.pcaImpl,
        p.k,
        p.maxIterations,
        p.seed,
        p.useAllFactorLevels,
        p.computeMetrics,
        p.imputeMissing,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for SVD model.
     */
    public static Call trainSvd(Grid z, SVDParametersV99 p) {
      return z.trainSvd(
        p.transform,
        p.svdMethod,
        p.nv,
        p.maxIterations,
        p.seed,
        p.keepU,
        p.uName,
        p.useAllFactorLevels,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for SVD model.
     */
    public static Call resumeSvd(Grid z, SVDParametersV99 p) {
      return z.resumeSvd(
        p.transform,
        p.svdMethod,
        p.nv,
        p.maxIterations,
        p.seed,
        p.keepU,
        p.uName,
        p.useAllFactorLevels,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for DRF model.
     */
    public static Call trainDrf(Grid z, DRFParametersV3 p) {
      return z.trainDrf(
        p.mtries,
        p.binomialDoubleTrees,
        p.sampleRate,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for DRF model.
     */
    public static Call resumeDrf(Grid z, DRFParametersV3 p) {
      return z.resumeDrf(
        p.mtries,
        p.binomialDoubleTrees,
        p.sampleRate,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for GBM model.
     */
    public static Call trainGbm(Grid z, GBMParametersV3 p) {
      return z.trainGbm(
        p.learnRate,
        p.learnRateAnnealing,
        p.sampleRate,
        p.colSampleRate,
        p.monotoneConstraints,
        p.maxAbsLeafnodePred,
        p.predNoiseBandwidth,
        p.interactionConstraints,
        p.autoRebalance,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for GBM model.
     */
    public static Call resumeGbm(Grid z, GBMParametersV3 p) {
      return z.resumeGbm(
        p.learnRate,
        p.learnRateAnnealing,
        p.sampleRate,
        p.colSampleRate,
        p.monotoneConstraints,
        p.maxAbsLeafnodePred,
        p.predNoiseBandwidth,
        p.interactionConstraints,
        p.autoRebalance,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for IsolationForest model.
     */
    public static Call trainIsolationforest(Grid z, IsolationForestParametersV3 p) {
      return z.trainIsolationforest(
        p.sampleSize,
        p.sampleRate,
        p.mtries,
        p.contamination,
        (p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for IsolationForest model.
     */
    public static Call resumeIsolationforest(Grid z, IsolationForestParametersV3 p) {
      return z.resumeIsolationforest(
        p.sampleSize,
        p.sampleRate,
        p.mtries,
        p.contamination,
        (p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for ExtendedIsolationForest model.
     */
    public static Call trainExtendedisolationforest(Grid z, ExtendedIsolationForestParametersV3 p) {
      return z.trainExtendedisolationforest(
        p.ntrees,
        p.sampleSize,
        p.extensionLevel,
        p.seed,
        p.scoreTreeInterval,
        p.disableTrainingMetrics,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for ExtendedIsolationForest model.
     */
    public static Call resumeExtendedisolationforest(Grid z, ExtendedIsolationForestParametersV3 p) {
      return z.resumeExtendedisolationforest(
        p.ntrees,
        p.sampleSize,
        p.extensionLevel,
        p.seed,
        p.scoreTreeInterval,
        p.disableTrainingMetrics,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for Aggregator model.
     */
    public static Call trainAggregator(Grid z, AggregatorParametersV99 p) {
      return z.trainAggregator(
        p.transform,
        p.pcaMethod,
        p.k,
        p.maxIterations,
        p.targetNumExemplars,
        p.relTolNumExemplars,
        p.seed,
        p.useAllFactorLevels,
        p.saveMappingFrame,
        p.numIterationWithoutNewExemplar,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for Aggregator model.
     */
    public static Call resumeAggregator(Grid z, AggregatorParametersV99 p) {
      return z.resumeAggregator(
        p.transform,
        p.pcaMethod,
        p.k,
        p.maxIterations,
        p.targetNumExemplars,
        p.relTolNumExemplars,
        p.seed,
        p.useAllFactorLevels,
        p.saveMappingFrame,
        p.numIterationWithoutNewExemplar,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for Word2Vec model.
     */
    public static Call trainWord2vec(Grid z, Word2VecParametersV3 p) {
      return z.trainWord2vec(
        p.vecSize,
        p.windowSize,
        p.sentSampleRate,
        p.normModel,
        p.epochs,
        p.minWordFreq,
        p.initLearningRate,
        p.wordModel,
        (p.preTrained == null? null : p.preTrained.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for Word2Vec model.
     */
    public static Call resumeWord2vec(Grid z, Word2VecParametersV3 p) {
      return z.resumeWord2vec(
        p.vecSize,
        p.windowSize,
        p.sentSampleRate,
        p.normModel,
        p.epochs,
        p.minWordFreq,
        p.initLearningRate,
        p.wordModel,
        (p.preTrained == null? null : p.preTrained.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for StackedEnsemble model.
     */
    public static Call trainStackedensemble(Grid z, StackedEnsembleParametersV99 p) {
      return z.trainStackedensemble(
        (p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
        p.metalearnerAlgorithm,
        p.metalearnerNfolds,
        p.metalearnerFoldAssignment,
        (p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
        p.metalearnerTransform,
        p.keepLeveloneFrame,
        p.metalearnerParams,
        (p.blendingFrame == null? null : p.blendingFrame.name),
        p.seed,
        p.scoreTrainingSamples,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for StackedEnsemble model.
     */
    public static Call resumeStackedensemble(Grid z, StackedEnsembleParametersV99 p) {
      return z.resumeStackedensemble(
        (p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
        p.metalearnerAlgorithm,
        p.metalearnerNfolds,
        p.metalearnerFoldAssignment,
        (p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
        p.metalearnerTransform,
        p.keepLeveloneFrame,
        p.metalearnerParams,
        (p.blendingFrame == null? null : p.blendingFrame.name),
        p.seed,
        p.scoreTrainingSamples,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for CoxPH model.
     */
    public static Call trainCoxph(Grid z, CoxPHParametersV3 p) {
      return z.trainCoxph(
        (p.startColumn == null? null : p.startColumn.columnName),
        (p.stopColumn == null? null : p.stopColumn.columnName),
        p.stratifyBy,
        p.ties,
        p.init,
        p.lreMin,
        p.maxIterations,
        p.interactionsOnly,
        p.interactions,
        p.interactionPairs,
        p.useAllFactorLevels,
        p.singleNodeMode,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for CoxPH model.
     */
    public static Call resumeCoxph(Grid z, CoxPHParametersV3 p) {
      return z.resumeCoxph(
        (p.startColumn == null? null : p.startColumn.columnName),
        (p.stopColumn == null? null : p.stopColumn.columnName),
        p.stratifyBy,
        p.ties,
        p.init,
        p.lreMin,
        p.maxIterations,
        p.interactionsOnly,
        p.interactions,
        p.interactionPairs,
        p.useAllFactorLevels,
        p.singleNodeMode,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for Generic model.
     */
    public static Call trainGeneric(Grid z, GenericParametersV3 p) {
      return z.trainGeneric(
        p.path,
        (p.modelKey == null? null : p.modelKey.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for Generic model.
     */
    public static Call resumeGeneric(Grid z, GenericParametersV3 p) {
      return z.resumeGeneric(
        p.path,
        (p.modelKey == null? null : p.modelKey.name),
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for GAM model.
     */
    public static Call trainGam(Grid z, GAMParametersV3 p) {
      return z.trainGam(
        p.seed,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.startval,
        p.lambdaSearch,
        p.earlyStopping,
        p.nlambdas,
        p.standardize,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.intercept,
        p.prior,
        p.coldStart,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        p.maxActivePredictors,
        p.interactions,
        p.interactionPairs,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.removeCollinearColumns,
        p.storeKnotLocations,
        p.numKnots,
        p.splineOrders,
        p.splinesNonNegative,
        p.gamColumns,
        p.scale,
        p.bs,
        p.keepGamCols,
        p.standardizeTpGamCols,
        p.scaleTpPenaltyMat,
        p.knotIds,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for GAM model.
     */
    public static Call resumeGam(Grid z, GAMParametersV3 p) {
      return z.resumeGam(
        p.seed,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.startval,
        p.lambdaSearch,
        p.earlyStopping,
        p.nlambdas,
        p.standardize,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.intercept,
        p.prior,
        p.coldStart,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        p.maxActivePredictors,
        p.interactions,
        p.interactionPairs,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.removeCollinearColumns,
        p.storeKnotLocations,
        p.numKnots,
        p.splineOrders,
        p.splinesNonNegative,
        p.gamColumns,
        p.scale,
        p.bs,
        p.keepGamCols,
        p.standardizeTpGamCols,
        p.scaleTpPenaltyMat,
        p.knotIds,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for ANOVAGLM model.
     */
    public static Call trainAnovaglm(Grid z, ANOVAGLMParametersV3 p) {
      return z.trainAnovaglm(
        p.seed,
        p.standardize,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.solver,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.computePValues,
        p.maxIterations,
        p.link,
        p.prior,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.highestInteractionTerm,
        p.type,
        p.earlyStopping,
        p.saveTransformedFramekeys,
        p.nparallelism,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for ANOVAGLM model.
     */
    public static Call resumeAnovaglm(Grid z, ANOVAGLMParametersV3 p) {
      return z.resumeAnovaglm(
        p.seed,
        p.standardize,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.solver,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.computePValues,
        p.maxIterations,
        p.link,
        p.prior,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.highestInteractionTerm,
        p.type,
        p.earlyStopping,
        p.saveTransformedFramekeys,
        p.nparallelism,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for PSVM model.
     */
    public static Call trainPsvm(Grid z, PSVMParametersV3 p) {
      return z.trainPsvm(
        p.hyperParam,
        p.kernelType,
        p.gamma,
        p.rankRatio,
        p.positiveWeight,
        p.negativeWeight,
        p.disableTrainingMetrics,
        p.svThreshold,
        p.maxIterations,
        p.factThreshold,
        p.feasibleThreshold,
        p.surrogateGapThreshold,
        p.muFactor,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for PSVM model.
     */
    public static Call resumePsvm(Grid z, PSVMParametersV3 p) {
      return z.resumePsvm(
        p.hyperParam,
        p.kernelType,
        p.gamma,
        p.rankRatio,
        p.positiveWeight,
        p.negativeWeight,
        p.disableTrainingMetrics,
        p.svThreshold,
        p.maxIterations,
        p.factThreshold,
        p.feasibleThreshold,
        p.surrogateGapThreshold,
        p.muFactor,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for RuleFit model.
     */
    public static Call trainRulefit(Grid z, RuleFitParametersV3 p) {
      return z.trainRulefit(
        p.seed,
        p.algorithm,
        p.minRuleLength,
        p.maxRuleLength,
        p.maxNumRules,
        p.modelType,
        p.ruleGenerationNtrees,
        p.removeDuplicates,
        p.lambda,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for RuleFit model.
     */
    public static Call resumeRulefit(Grid z, RuleFitParametersV3 p) {
      return z.resumeRulefit(
        p.seed,
        p.algorithm,
        p.minRuleLength,
        p.maxRuleLength,
        p.maxNumRules,
        p.modelType,
        p.ruleGenerationNtrees,
        p.removeDuplicates,
        p.lambda,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for UpliftDRF model.
     */
    public static Call trainUpliftdrf(Grid z, UpliftDRFParametersV3 p) {
      return z.trainUpliftdrf(
        p.mtries,
        p.sampleRate,
        p.treatmentColumn,
        p.upliftMetric,
        p.auucType,
        p.auucNbins,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for UpliftDRF model.
     */
    public static Call resumeUpliftdrf(Grid z, UpliftDRFParametersV3 p) {
      return z.resumeUpliftdrf(
        p.mtries,
        p.sampleRate,
        p.treatmentColumn,
        p.upliftMetric,
        p.auucType,
        p.auucNbins,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.ntrees,
        p.maxDepth,
        p.minRows,
        p.nbins,
        p.nbinsTopLevel,
        p.nbinsCats,
        p.r2Stopping,
        p.seed,
        p.buildTreeOneNode,
        p.sampleRatePerClass,
        p.colSampleRatePerTree,
        p.colSampleRateChangePerLevel,
        p.scoreTreeInterval,
        p.minSplitImprovement,
        p.histogramType,
        p.calibrateModel,
        (p.calibrationFrame == null? null : p.calibrationFrame.name),
        p.calibrationMethod,
        p.checkConstantResponse,
        p.inTrainingCheckpointsDir,
        p.inTrainingCheckpointsTreeInterval,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for ModelSelection model.
     */
    public static Call trainModelselection(Grid z, ModelSelectionParametersV3 p) {
      return z.trainModelselection(
        p.seed,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.multinodeMode,
        p.buildGlmModel,
        p.earlyStopping,
        p.nlambdas,
        p.scoreIterationInterval,
        p.standardize,
        p.coldStart,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.startval,
        p.calcLike,
        p.mode,
        p.intercept,
        p.prior,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        p.maxActivePredictors,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.removeCollinearColumns,
        p.maxPredictorNumber,
        p.minPredictorNumber,
        p.nparallelism,
        p.pValuesThreshold,
        p.influence,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for ModelSelection model.
     */
    public static Call resumeModelselection(Grid z, ModelSelectionParametersV3 p) {
      return z.resumeModelselection(
        p.seed,
        p.family,
        p.tweedieVariancePower,
        p.tweedieLinkPower,
        p.theta,
        p.solver,
        p.alpha,
        p.lambda,
        p.lambdaSearch,
        p.multinodeMode,
        p.buildGlmModel,
        p.earlyStopping,
        p.nlambdas,
        p.scoreIterationInterval,
        p.standardize,
        p.coldStart,
        p.missingValuesHandling,
        (p.plugValues == null? null : p.plugValues.name),
        p.nonNegative,
        p.maxIterations,
        p.betaEpsilon,
        p.objectiveEpsilon,
        p.gradientEpsilon,
        p.objReg,
        p.link,
        p.startval,
        p.calcLike,
        p.mode,
        p.intercept,
        p.prior,
        p.lambdaMinRatio,
        (p.betaConstraints == null? null : p.betaConstraints.name),
        p.maxActivePredictors,
        p.balanceClasses,
        p.classSamplingFactors,
        p.maxAfterBalanceSize,
        p.maxConfusionMatrixSize,
        p.computePValues,
        p.removeCollinearColumns,
        p.maxPredictorNumber,
        p.minPredictorNumber,
        p.nparallelism,
        p.pValuesThreshold,
        p.influence,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for IsotonicRegression model.
     */
    public static Call trainIsotonicregression(Grid z, IsotonicRegressionParametersV3 p) {
      return z.trainIsotonicregression(
        p.outOfBounds,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for IsotonicRegression model.
     */
    public static Call resumeIsotonicregression(Grid z, IsotonicRegressionParametersV3 p) {
      return z.resumeIsotonicregression(
        p.outOfBounds,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for DT model.
     */
    public static Call trainDt(Grid z, DTParametersV3 p) {
      return z.trainDt(
        p.seed,
        p.maxDepth,
        p.minRows,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for DT model.
     */
    public static Call resumeDt(Grid z, DTParametersV3 p) {
      return z.resumeDt(
        p.seed,
        p.maxDepth,
        p.minRows,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Run grid search for AdaBoost model.
     */
    public static Call trainAdaboost(Grid z, AdaBoostParametersV3 p) {
      return z.trainAdaboost(
        p.nlearners,
        p.weakLearner,
        p.learnRate,
        p.weakLearnerParams,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Resume grid search for AdaBoost model.
     */
    public static Call resumeAdaboost(Grid z, AdaBoostParametersV3 p) {
      return z.resumeAdaboost(
        p.nlearners,
        p.weakLearner,
        p.learnRate,
        p.weakLearnerParams,
        p.seed,
        (p.modelId == null? null : p.modelId.name),
        (p.trainingFrame == null? null : p.trainingFrame.name),
        (p.validationFrame == null? null : p.validationFrame.name),
        p.nfolds,
        p.keepCrossValidationModels,
        p.keepCrossValidationPredictions,
        p.keepCrossValidationFoldAssignment,
        p.parallelizeCrossValidation,
        p.distribution,
        p.tweediePower,
        p.quantileAlpha,
        p.huberAlpha,
        (p.responseColumn == null? null : p.responseColumn.columnName),
        (p.weightsColumn == null? null : p.weightsColumn.columnName),
        (p.offsetColumn == null? null : p.offsetColumn.columnName),
        (p.foldColumn == null? null : p.foldColumn.columnName),
        p.foldAssignment,
        p.categoricalEncoding,
        p.maxCategoricalLevels,
        p.ignoredColumns,
        p.ignoreConstCols,
        p.scoreEachIteration,
        (p.checkpoint == null? null : p.checkpoint.name),
        p.stoppingRounds,
        p.maxRuntimeSecs,
        p.stoppingMetric,
        p.stoppingTolerance,
        p.gainsliftBins,
        p.customMetricFunc,
        p.customDistributionFunc,
        p.exportCheckpointsDir,
        p.aucType
      );
    }

    /**
     * Return an array of Strings for an array of keys.
     */
    public static String[] keyArrayToStringArray(KeyV3[] keys) {
      if (keys == null) return null;
      String[] ids = new String[keys.length];
      int i = 0;
      for (KeyV3 key : keys) ids[i++] = key.name;
      return ids;
    }
  }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy