All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azurenative.machinelearningservices.inputs.ImageModelDistributionSettingsClassificationArgs Maven / Gradle / Ivy

There is a newer version: 2.82.0
Show newest version
// *** WARNING: this file was generated by pulumi-java-gen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***

package com.pulumi.azurenative.machinelearningservices.inputs;

import com.pulumi.core.Output;
import com.pulumi.core.annotations.Import;
import java.lang.String;
import java.util.Objects;
import java.util.Optional;
import javax.annotation.Nullable;


/**
 * Distribution expressions to sweep over values of model settings.
 * <example>
 * Some examples are:
 * 
 */
public final class ImageModelDistributionSettingsClassificationArgs extends com.pulumi.resources.ResourceArgs {

    public static final ImageModelDistributionSettingsClassificationArgs Empty = new ImageModelDistributionSettingsClassificationArgs();

    /**
     * Enable AMSGrad when optimizer is 'adam' or 'adamw'.
     * 
     */
    @Import(name="amsGradient")
    private @Nullable Output amsGradient;

    /**
     * @return Enable AMSGrad when optimizer is 'adam' or 'adamw'.
     * 
     */
    public Optional> amsGradient() {
        return Optional.ofNullable(this.amsGradient);
    }

    /**
     * Settings for using Augmentations.
     * 
     */
    @Import(name="augmentations")
    private @Nullable Output augmentations;

    /**
     * @return Settings for using Augmentations.
     * 
     */
    public Optional> augmentations() {
        return Optional.ofNullable(this.augmentations);
    }

    /**
     * Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="beta1")
    private @Nullable Output beta1;

    /**
     * @return Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    public Optional> beta1() {
        return Optional.ofNullable(this.beta1);
    }

    /**
     * Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="beta2")
    private @Nullable Output beta2;

    /**
     * @return Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    public Optional> beta2() {
        return Optional.ofNullable(this.beta2);
    }

    /**
     * Whether to use distributer training.
     * 
     */
    @Import(name="distributed")
    private @Nullable Output distributed;

    /**
     * @return Whether to use distributer training.
     * 
     */
    public Optional> distributed() {
        return Optional.ofNullable(this.distributed);
    }

    /**
     * Enable early stopping logic during training.
     * 
     */
    @Import(name="earlyStopping")
    private @Nullable Output earlyStopping;

    /**
     * @return Enable early stopping logic during training.
     * 
     */
    public Optional> earlyStopping() {
        return Optional.ofNullable(this.earlyStopping);
    }

    /**
     * Minimum number of epochs or validation evaluations to wait before primary metric improvement
     * is tracked for early stopping. Must be a positive integer.
     * 
     */
    @Import(name="earlyStoppingDelay")
    private @Nullable Output earlyStoppingDelay;

    /**
     * @return Minimum number of epochs or validation evaluations to wait before primary metric improvement
     * is tracked for early stopping. Must be a positive integer.
     * 
     */
    public Optional> earlyStoppingDelay() {
        return Optional.ofNullable(this.earlyStoppingDelay);
    }

    /**
     * Minimum number of epochs or validation evaluations with no primary metric improvement before
     * the run is stopped. Must be a positive integer.
     * 
     */
    @Import(name="earlyStoppingPatience")
    private @Nullable Output earlyStoppingPatience;

    /**
     * @return Minimum number of epochs or validation evaluations with no primary metric improvement before
     * the run is stopped. Must be a positive integer.
     * 
     */
    public Optional> earlyStoppingPatience() {
        return Optional.ofNullable(this.earlyStoppingPatience);
    }

    /**
     * Enable normalization when exporting ONNX model.
     * 
     */
    @Import(name="enableOnnxNormalization")
    private @Nullable Output enableOnnxNormalization;

    /**
     * @return Enable normalization when exporting ONNX model.
     * 
     */
    public Optional> enableOnnxNormalization() {
        return Optional.ofNullable(this.enableOnnxNormalization);
    }

    /**
     * Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
     * 
     */
    @Import(name="evaluationFrequency")
    private @Nullable Output evaluationFrequency;

    /**
     * @return Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
     * 
     */
    public Optional> evaluationFrequency() {
        return Optional.ofNullable(this.evaluationFrequency);
    }

    /**
     * Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
     * updating the model weights while accumulating the gradients of those steps, and then using
     * the accumulated gradients to compute the weight updates. Must be a positive integer.
     * 
     */
    @Import(name="gradientAccumulationStep")
    private @Nullable Output gradientAccumulationStep;

    /**
     * @return Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
     * updating the model weights while accumulating the gradients of those steps, and then using
     * the accumulated gradients to compute the weight updates. Must be a positive integer.
     * 
     */
    public Optional> gradientAccumulationStep() {
        return Optional.ofNullable(this.gradientAccumulationStep);
    }

    /**
     * Number of layers to freeze for the model. Must be a positive integer.
     * For instance, passing 2 as value for 'seresnext' means
     * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
     * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    @Import(name="layersToFreeze")
    private @Nullable Output layersToFreeze;

    /**
     * @return Number of layers to freeze for the model. Must be a positive integer.
     * For instance, passing 2 as value for 'seresnext' means
     * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
     * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    public Optional> layersToFreeze() {
        return Optional.ofNullable(this.layersToFreeze);
    }

    /**
     * Initial learning rate. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="learningRate")
    private @Nullable Output learningRate;

    /**
     * @return Initial learning rate. Must be a float in the range [0, 1].
     * 
     */
    public Optional> learningRate() {
        return Optional.ofNullable(this.learningRate);
    }

    /**
     * Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
     * 
     */
    @Import(name="learningRateScheduler")
    private @Nullable Output learningRateScheduler;

    /**
     * @return Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
     * 
     */
    public Optional> learningRateScheduler() {
        return Optional.ofNullable(this.learningRateScheduler);
    }

    /**
     * Name of the model to use for training.
     * For more information on the available models please visit the official documentation:
     * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    @Import(name="modelName")
    private @Nullable Output modelName;

    /**
     * @return Name of the model to use for training.
     * For more information on the available models please visit the official documentation:
     * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    public Optional> modelName() {
        return Optional.ofNullable(this.modelName);
    }

    /**
     * Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="momentum")
    private @Nullable Output momentum;

    /**
     * @return Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
     * 
     */
    public Optional> momentum() {
        return Optional.ofNullable(this.momentum);
    }

    /**
     * Enable nesterov when optimizer is 'sgd'.
     * 
     */
    @Import(name="nesterov")
    private @Nullable Output nesterov;

    /**
     * @return Enable nesterov when optimizer is 'sgd'.
     * 
     */
    public Optional> nesterov() {
        return Optional.ofNullable(this.nesterov);
    }

    /**
     * Number of training epochs. Must be a positive integer.
     * 
     */
    @Import(name="numberOfEpochs")
    private @Nullable Output numberOfEpochs;

    /**
     * @return Number of training epochs. Must be a positive integer.
     * 
     */
    public Optional> numberOfEpochs() {
        return Optional.ofNullable(this.numberOfEpochs);
    }

    /**
     * Number of data loader workers. Must be a non-negative integer.
     * 
     */
    @Import(name="numberOfWorkers")
    private @Nullable Output numberOfWorkers;

    /**
     * @return Number of data loader workers. Must be a non-negative integer.
     * 
     */
    public Optional> numberOfWorkers() {
        return Optional.ofNullable(this.numberOfWorkers);
    }

    /**
     * Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
     * 
     */
    @Import(name="optimizer")
    private @Nullable Output optimizer;

    /**
     * @return Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
     * 
     */
    public Optional> optimizer() {
        return Optional.ofNullable(this.optimizer);
    }

    /**
     * Random seed to be used when using deterministic training.
     * 
     */
    @Import(name="randomSeed")
    private @Nullable Output randomSeed;

    /**
     * @return Random seed to be used when using deterministic training.
     * 
     */
    public Optional> randomSeed() {
        return Optional.ofNullable(this.randomSeed);
    }

    /**
     * Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="stepLRGamma")
    private @Nullable Output stepLRGamma;

    /**
     * @return Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
     * 
     */
    public Optional> stepLRGamma() {
        return Optional.ofNullable(this.stepLRGamma);
    }

    /**
     * Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
     * 
     */
    @Import(name="stepLRStepSize")
    private @Nullable Output stepLRStepSize;

    /**
     * @return Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
     * 
     */
    public Optional> stepLRStepSize() {
        return Optional.ofNullable(this.stepLRStepSize);
    }

    /**
     * Training batch size. Must be a positive integer.
     * 
     */
    @Import(name="trainingBatchSize")
    private @Nullable Output trainingBatchSize;

    /**
     * @return Training batch size. Must be a positive integer.
     * 
     */
    public Optional> trainingBatchSize() {
        return Optional.ofNullable(this.trainingBatchSize);
    }

    /**
     * Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
     * 
     */
    @Import(name="trainingCropSize")
    private @Nullable Output trainingCropSize;

    /**
     * @return Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
     * 
     */
    public Optional> trainingCropSize() {
        return Optional.ofNullable(this.trainingCropSize);
    }

    /**
     * Validation batch size. Must be a positive integer.
     * 
     */
    @Import(name="validationBatchSize")
    private @Nullable Output validationBatchSize;

    /**
     * @return Validation batch size. Must be a positive integer.
     * 
     */
    public Optional> validationBatchSize() {
        return Optional.ofNullable(this.validationBatchSize);
    }

    /**
     * Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
     * 
     */
    @Import(name="validationCropSize")
    private @Nullable Output validationCropSize;

    /**
     * @return Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
     * 
     */
    public Optional> validationCropSize() {
        return Optional.ofNullable(this.validationCropSize);
    }

    /**
     * Image size to which to resize before cropping for validation dataset. Must be a positive integer.
     * 
     */
    @Import(name="validationResizeSize")
    private @Nullable Output validationResizeSize;

    /**
     * @return Image size to which to resize before cropping for validation dataset. Must be a positive integer.
     * 
     */
    public Optional> validationResizeSize() {
        return Optional.ofNullable(this.validationResizeSize);
    }

    /**
     * Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
     * 
     */
    @Import(name="warmupCosineLRCycles")
    private @Nullable Output warmupCosineLRCycles;

    /**
     * @return Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
     * 
     */
    public Optional> warmupCosineLRCycles() {
        return Optional.ofNullable(this.warmupCosineLRCycles);
    }

    /**
     * Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
     * 
     */
    @Import(name="warmupCosineLRWarmupEpochs")
    private @Nullable Output warmupCosineLRWarmupEpochs;

    /**
     * @return Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
     * 
     */
    public Optional> warmupCosineLRWarmupEpochs() {
        return Optional.ofNullable(this.warmupCosineLRWarmupEpochs);
    }

    /**
     * Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
     * 
     */
    @Import(name="weightDecay")
    private @Nullable Output weightDecay;

    /**
     * @return Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
     * 
     */
    public Optional> weightDecay() {
        return Optional.ofNullable(this.weightDecay);
    }

    /**
     * Weighted loss. The accepted values are 0 for no weighted loss.
     * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
     * 
     */
    @Import(name="weightedLoss")
    private @Nullable Output weightedLoss;

    /**
     * @return Weighted loss. The accepted values are 0 for no weighted loss.
     * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
     * 
     */
    public Optional> weightedLoss() {
        return Optional.ofNullable(this.weightedLoss);
    }

    private ImageModelDistributionSettingsClassificationArgs() {}

    private ImageModelDistributionSettingsClassificationArgs(ImageModelDistributionSettingsClassificationArgs $) {
        this.amsGradient = $.amsGradient;
        this.augmentations = $.augmentations;
        this.beta1 = $.beta1;
        this.beta2 = $.beta2;
        this.distributed = $.distributed;
        this.earlyStopping = $.earlyStopping;
        this.earlyStoppingDelay = $.earlyStoppingDelay;
        this.earlyStoppingPatience = $.earlyStoppingPatience;
        this.enableOnnxNormalization = $.enableOnnxNormalization;
        this.evaluationFrequency = $.evaluationFrequency;
        this.gradientAccumulationStep = $.gradientAccumulationStep;
        this.layersToFreeze = $.layersToFreeze;
        this.learningRate = $.learningRate;
        this.learningRateScheduler = $.learningRateScheduler;
        this.modelName = $.modelName;
        this.momentum = $.momentum;
        this.nesterov = $.nesterov;
        this.numberOfEpochs = $.numberOfEpochs;
        this.numberOfWorkers = $.numberOfWorkers;
        this.optimizer = $.optimizer;
        this.randomSeed = $.randomSeed;
        this.stepLRGamma = $.stepLRGamma;
        this.stepLRStepSize = $.stepLRStepSize;
        this.trainingBatchSize = $.trainingBatchSize;
        this.trainingCropSize = $.trainingCropSize;
        this.validationBatchSize = $.validationBatchSize;
        this.validationCropSize = $.validationCropSize;
        this.validationResizeSize = $.validationResizeSize;
        this.warmupCosineLRCycles = $.warmupCosineLRCycles;
        this.warmupCosineLRWarmupEpochs = $.warmupCosineLRWarmupEpochs;
        this.weightDecay = $.weightDecay;
        this.weightedLoss = $.weightedLoss;
    }

    public static Builder builder() {
        return new Builder();
    }
    public static Builder builder(ImageModelDistributionSettingsClassificationArgs defaults) {
        return new Builder(defaults);
    }

    public static final class Builder {
        private ImageModelDistributionSettingsClassificationArgs $;

        public Builder() {
            $ = new ImageModelDistributionSettingsClassificationArgs();
        }

        public Builder(ImageModelDistributionSettingsClassificationArgs defaults) {
            $ = new ImageModelDistributionSettingsClassificationArgs(Objects.requireNonNull(defaults));
        }

        /**
         * @param amsGradient Enable AMSGrad when optimizer is 'adam' or 'adamw'.
         * 
         * @return builder
         * 
         */
        public Builder amsGradient(@Nullable Output amsGradient) {
            $.amsGradient = amsGradient;
            return this;
        }

        /**
         * @param amsGradient Enable AMSGrad when optimizer is 'adam' or 'adamw'.
         * 
         * @return builder
         * 
         */
        public Builder amsGradient(String amsGradient) {
            return amsGradient(Output.of(amsGradient));
        }

        /**
         * @param augmentations Settings for using Augmentations.
         * 
         * @return builder
         * 
         */
        public Builder augmentations(@Nullable Output augmentations) {
            $.augmentations = augmentations;
            return this;
        }

        /**
         * @param augmentations Settings for using Augmentations.
         * 
         * @return builder
         * 
         */
        public Builder augmentations(String augmentations) {
            return augmentations(Output.of(augmentations));
        }

        /**
         * @param beta1 Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder beta1(@Nullable Output beta1) {
            $.beta1 = beta1;
            return this;
        }

        /**
         * @param beta1 Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder beta1(String beta1) {
            return beta1(Output.of(beta1));
        }

        /**
         * @param beta2 Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder beta2(@Nullable Output beta2) {
            $.beta2 = beta2;
            return this;
        }

        /**
         * @param beta2 Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder beta2(String beta2) {
            return beta2(Output.of(beta2));
        }

        /**
         * @param distributed Whether to use distributer training.
         * 
         * @return builder
         * 
         */
        public Builder distributed(@Nullable Output distributed) {
            $.distributed = distributed;
            return this;
        }

        /**
         * @param distributed Whether to use distributer training.
         * 
         * @return builder
         * 
         */
        public Builder distributed(String distributed) {
            return distributed(Output.of(distributed));
        }

        /**
         * @param earlyStopping Enable early stopping logic during training.
         * 
         * @return builder
         * 
         */
        public Builder earlyStopping(@Nullable Output earlyStopping) {
            $.earlyStopping = earlyStopping;
            return this;
        }

        /**
         * @param earlyStopping Enable early stopping logic during training.
         * 
         * @return builder
         * 
         */
        public Builder earlyStopping(String earlyStopping) {
            return earlyStopping(Output.of(earlyStopping));
        }

        /**
         * @param earlyStoppingDelay Minimum number of epochs or validation evaluations to wait before primary metric improvement
         * is tracked for early stopping. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder earlyStoppingDelay(@Nullable Output earlyStoppingDelay) {
            $.earlyStoppingDelay = earlyStoppingDelay;
            return this;
        }

        /**
         * @param earlyStoppingDelay Minimum number of epochs or validation evaluations to wait before primary metric improvement
         * is tracked for early stopping. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder earlyStoppingDelay(String earlyStoppingDelay) {
            return earlyStoppingDelay(Output.of(earlyStoppingDelay));
        }

        /**
         * @param earlyStoppingPatience Minimum number of epochs or validation evaluations with no primary metric improvement before
         * the run is stopped. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder earlyStoppingPatience(@Nullable Output earlyStoppingPatience) {
            $.earlyStoppingPatience = earlyStoppingPatience;
            return this;
        }

        /**
         * @param earlyStoppingPatience Minimum number of epochs or validation evaluations with no primary metric improvement before
         * the run is stopped. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder earlyStoppingPatience(String earlyStoppingPatience) {
            return earlyStoppingPatience(Output.of(earlyStoppingPatience));
        }

        /**
         * @param enableOnnxNormalization Enable normalization when exporting ONNX model.
         * 
         * @return builder
         * 
         */
        public Builder enableOnnxNormalization(@Nullable Output enableOnnxNormalization) {
            $.enableOnnxNormalization = enableOnnxNormalization;
            return this;
        }

        /**
         * @param enableOnnxNormalization Enable normalization when exporting ONNX model.
         * 
         * @return builder
         * 
         */
        public Builder enableOnnxNormalization(String enableOnnxNormalization) {
            return enableOnnxNormalization(Output.of(enableOnnxNormalization));
        }

        /**
         * @param evaluationFrequency Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder evaluationFrequency(@Nullable Output evaluationFrequency) {
            $.evaluationFrequency = evaluationFrequency;
            return this;
        }

        /**
         * @param evaluationFrequency Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder evaluationFrequency(String evaluationFrequency) {
            return evaluationFrequency(Output.of(evaluationFrequency));
        }

        /**
         * @param gradientAccumulationStep Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
         * updating the model weights while accumulating the gradients of those steps, and then using
         * the accumulated gradients to compute the weight updates. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder gradientAccumulationStep(@Nullable Output gradientAccumulationStep) {
            $.gradientAccumulationStep = gradientAccumulationStep;
            return this;
        }

        /**
         * @param gradientAccumulationStep Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
         * updating the model weights while accumulating the gradients of those steps, and then using
         * the accumulated gradients to compute the weight updates. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder gradientAccumulationStep(String gradientAccumulationStep) {
            return gradientAccumulationStep(Output.of(gradientAccumulationStep));
        }

        /**
         * @param layersToFreeze Number of layers to freeze for the model. Must be a positive integer.
         * For instance, passing 2 as value for 'seresnext' means
         * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
         * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
         * 
         * @return builder
         * 
         */
        public Builder layersToFreeze(@Nullable Output layersToFreeze) {
            $.layersToFreeze = layersToFreeze;
            return this;
        }

        /**
         * @param layersToFreeze Number of layers to freeze for the model. Must be a positive integer.
         * For instance, passing 2 as value for 'seresnext' means
         * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
         * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
         * 
         * @return builder
         * 
         */
        public Builder layersToFreeze(String layersToFreeze) {
            return layersToFreeze(Output.of(layersToFreeze));
        }

        /**
         * @param learningRate Initial learning rate. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder learningRate(@Nullable Output learningRate) {
            $.learningRate = learningRate;
            return this;
        }

        /**
         * @param learningRate Initial learning rate. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder learningRate(String learningRate) {
            return learningRate(Output.of(learningRate));
        }

        /**
         * @param learningRateScheduler Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
         * 
         * @return builder
         * 
         */
        public Builder learningRateScheduler(@Nullable Output learningRateScheduler) {
            $.learningRateScheduler = learningRateScheduler;
            return this;
        }

        /**
         * @param learningRateScheduler Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
         * 
         * @return builder
         * 
         */
        public Builder learningRateScheduler(String learningRateScheduler) {
            return learningRateScheduler(Output.of(learningRateScheduler));
        }

        /**
         * @param modelName Name of the model to use for training.
         * For more information on the available models please visit the official documentation:
         * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
         * 
         * @return builder
         * 
         */
        public Builder modelName(@Nullable Output modelName) {
            $.modelName = modelName;
            return this;
        }

        /**
         * @param modelName Name of the model to use for training.
         * For more information on the available models please visit the official documentation:
         * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
         * 
         * @return builder
         * 
         */
        public Builder modelName(String modelName) {
            return modelName(Output.of(modelName));
        }

        /**
         * @param momentum Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder momentum(@Nullable Output momentum) {
            $.momentum = momentum;
            return this;
        }

        /**
         * @param momentum Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder momentum(String momentum) {
            return momentum(Output.of(momentum));
        }

        /**
         * @param nesterov Enable nesterov when optimizer is 'sgd'.
         * 
         * @return builder
         * 
         */
        public Builder nesterov(@Nullable Output nesterov) {
            $.nesterov = nesterov;
            return this;
        }

        /**
         * @param nesterov Enable nesterov when optimizer is 'sgd'.
         * 
         * @return builder
         * 
         */
        public Builder nesterov(String nesterov) {
            return nesterov(Output.of(nesterov));
        }

        /**
         * @param numberOfEpochs Number of training epochs. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder numberOfEpochs(@Nullable Output numberOfEpochs) {
            $.numberOfEpochs = numberOfEpochs;
            return this;
        }

        /**
         * @param numberOfEpochs Number of training epochs. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder numberOfEpochs(String numberOfEpochs) {
            return numberOfEpochs(Output.of(numberOfEpochs));
        }

        /**
         * @param numberOfWorkers Number of data loader workers. Must be a non-negative integer.
         * 
         * @return builder
         * 
         */
        public Builder numberOfWorkers(@Nullable Output numberOfWorkers) {
            $.numberOfWorkers = numberOfWorkers;
            return this;
        }

        /**
         * @param numberOfWorkers Number of data loader workers. Must be a non-negative integer.
         * 
         * @return builder
         * 
         */
        public Builder numberOfWorkers(String numberOfWorkers) {
            return numberOfWorkers(Output.of(numberOfWorkers));
        }

        /**
         * @param optimizer Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
         * 
         * @return builder
         * 
         */
        public Builder optimizer(@Nullable Output optimizer) {
            $.optimizer = optimizer;
            return this;
        }

        /**
         * @param optimizer Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
         * 
         * @return builder
         * 
         */
        public Builder optimizer(String optimizer) {
            return optimizer(Output.of(optimizer));
        }

        /**
         * @param randomSeed Random seed to be used when using deterministic training.
         * 
         * @return builder
         * 
         */
        public Builder randomSeed(@Nullable Output randomSeed) {
            $.randomSeed = randomSeed;
            return this;
        }

        /**
         * @param randomSeed Random seed to be used when using deterministic training.
         * 
         * @return builder
         * 
         */
        public Builder randomSeed(String randomSeed) {
            return randomSeed(Output.of(randomSeed));
        }

        /**
         * @param stepLRGamma Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder stepLRGamma(@Nullable Output stepLRGamma) {
            $.stepLRGamma = stepLRGamma;
            return this;
        }

        /**
         * @param stepLRGamma Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder stepLRGamma(String stepLRGamma) {
            return stepLRGamma(Output.of(stepLRGamma));
        }

        /**
         * @param stepLRStepSize Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder stepLRStepSize(@Nullable Output stepLRStepSize) {
            $.stepLRStepSize = stepLRStepSize;
            return this;
        }

        /**
         * @param stepLRStepSize Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder stepLRStepSize(String stepLRStepSize) {
            return stepLRStepSize(Output.of(stepLRStepSize));
        }

        /**
         * @param trainingBatchSize Training batch size. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder trainingBatchSize(@Nullable Output trainingBatchSize) {
            $.trainingBatchSize = trainingBatchSize;
            return this;
        }

        /**
         * @param trainingBatchSize Training batch size. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder trainingBatchSize(String trainingBatchSize) {
            return trainingBatchSize(Output.of(trainingBatchSize));
        }

        /**
         * @param trainingCropSize Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder trainingCropSize(@Nullable Output trainingCropSize) {
            $.trainingCropSize = trainingCropSize;
            return this;
        }

        /**
         * @param trainingCropSize Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder trainingCropSize(String trainingCropSize) {
            return trainingCropSize(Output.of(trainingCropSize));
        }

        /**
         * @param validationBatchSize Validation batch size. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationBatchSize(@Nullable Output validationBatchSize) {
            $.validationBatchSize = validationBatchSize;
            return this;
        }

        /**
         * @param validationBatchSize Validation batch size. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationBatchSize(String validationBatchSize) {
            return validationBatchSize(Output.of(validationBatchSize));
        }

        /**
         * @param validationCropSize Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationCropSize(@Nullable Output validationCropSize) {
            $.validationCropSize = validationCropSize;
            return this;
        }

        /**
         * @param validationCropSize Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationCropSize(String validationCropSize) {
            return validationCropSize(Output.of(validationCropSize));
        }

        /**
         * @param validationResizeSize Image size to which to resize before cropping for validation dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationResizeSize(@Nullable Output validationResizeSize) {
            $.validationResizeSize = validationResizeSize;
            return this;
        }

        /**
         * @param validationResizeSize Image size to which to resize before cropping for validation dataset. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder validationResizeSize(String validationResizeSize) {
            return validationResizeSize(Output.of(validationResizeSize));
        }

        /**
         * @param warmupCosineLRCycles Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder warmupCosineLRCycles(@Nullable Output warmupCosineLRCycles) {
            $.warmupCosineLRCycles = warmupCosineLRCycles;
            return this;
        }

        /**
         * @param warmupCosineLRCycles Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
         * 
         * @return builder
         * 
         */
        public Builder warmupCosineLRCycles(String warmupCosineLRCycles) {
            return warmupCosineLRCycles(Output.of(warmupCosineLRCycles));
        }

        /**
         * @param warmupCosineLRWarmupEpochs Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder warmupCosineLRWarmupEpochs(@Nullable Output warmupCosineLRWarmupEpochs) {
            $.warmupCosineLRWarmupEpochs = warmupCosineLRWarmupEpochs;
            return this;
        }

        /**
         * @param warmupCosineLRWarmupEpochs Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
         * 
         * @return builder
         * 
         */
        public Builder warmupCosineLRWarmupEpochs(String warmupCosineLRWarmupEpochs) {
            return warmupCosineLRWarmupEpochs(Output.of(warmupCosineLRWarmupEpochs));
        }

        /**
         * @param weightDecay Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
         * 
         * @return builder
         * 
         */
        public Builder weightDecay(@Nullable Output weightDecay) {
            $.weightDecay = weightDecay;
            return this;
        }

        /**
         * @param weightDecay Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
         * 
         * @return builder
         * 
         */
        public Builder weightDecay(String weightDecay) {
            return weightDecay(Output.of(weightDecay));
        }

        /**
         * @param weightedLoss Weighted loss. The accepted values are 0 for no weighted loss.
         * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
         * 
         * @return builder
         * 
         */
        public Builder weightedLoss(@Nullable Output weightedLoss) {
            $.weightedLoss = weightedLoss;
            return this;
        }

        /**
         * @param weightedLoss Weighted loss. The accepted values are 0 for no weighted loss.
         * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
         * 
         * @return builder
         * 
         */
        public Builder weightedLoss(String weightedLoss) {
            return weightedLoss(Output.of(weightedLoss));
        }

        public ImageModelDistributionSettingsClassificationArgs build() {
            return $;
        }
    }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy