All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azurenative.machinelearningservices.outputs.ImageModelDistributionSettingsClassificationResponse Maven / Gradle / Ivy

// *** WARNING: this file was generated by pulumi-java-gen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***

package com.pulumi.azurenative.machinelearningservices.outputs;

import com.pulumi.core.annotations.CustomType;
import java.lang.String;
import java.util.Objects;
import java.util.Optional;
import javax.annotation.Nullable;

@CustomType
public final class ImageModelDistributionSettingsClassificationResponse {
    /**
     * @return Enable AMSGrad when optimizer is 'adam' or 'adamw'.
     * 
     */
    private @Nullable String amsGradient;
    /**
     * @return Settings for using Augmentations.
     * 
     */
    private @Nullable String augmentations;
    /**
     * @return Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String beta1;
    /**
     * @return Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String beta2;
    /**
     * @return Whether to use distributer training.
     * 
     */
    private @Nullable String distributed;
    /**
     * @return Enable early stopping logic during training.
     * 
     */
    private @Nullable String earlyStopping;
    /**
     * @return Minimum number of epochs or validation evaluations to wait before primary metric improvement
     * is tracked for early stopping. Must be a positive integer.
     * 
     */
    private @Nullable String earlyStoppingDelay;
    /**
     * @return Minimum number of epochs or validation evaluations with no primary metric improvement before
     * the run is stopped. Must be a positive integer.
     * 
     */
    private @Nullable String earlyStoppingPatience;
    /**
     * @return Enable normalization when exporting ONNX model.
     * 
     */
    private @Nullable String enableOnnxNormalization;
    /**
     * @return Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
     * 
     */
    private @Nullable String evaluationFrequency;
    /**
     * @return Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
     * updating the model weights while accumulating the gradients of those steps, and then using
     * the accumulated gradients to compute the weight updates. Must be a positive integer.
     * 
     */
    private @Nullable String gradientAccumulationStep;
    /**
     * @return Number of layers to freeze for the model. Must be a positive integer.
     * For instance, passing 2 as value for 'seresnext' means
     * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
     * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    private @Nullable String layersToFreeze;
    /**
     * @return Initial learning rate. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String learningRate;
    /**
     * @return Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
     * 
     */
    private @Nullable String learningRateScheduler;
    /**
     * @return Name of the model to use for training.
     * For more information on the available models please visit the official documentation:
     * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    private @Nullable String modelName;
    /**
     * @return Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String momentum;
    /**
     * @return Enable nesterov when optimizer is 'sgd'.
     * 
     */
    private @Nullable String nesterov;
    /**
     * @return Number of training epochs. Must be a positive integer.
     * 
     */
    private @Nullable String numberOfEpochs;
    /**
     * @return Number of data loader workers. Must be a non-negative integer.
     * 
     */
    private @Nullable String numberOfWorkers;
    /**
     * @return Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
     * 
     */
    private @Nullable String optimizer;
    /**
     * @return Random seed to be used when using deterministic training.
     * 
     */
    private @Nullable String randomSeed;
    /**
     * @return Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String stepLRGamma;
    /**
     * @return Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
     * 
     */
    private @Nullable String stepLRStepSize;
    /**
     * @return Training batch size. Must be a positive integer.
     * 
     */
    private @Nullable String trainingBatchSize;
    /**
     * @return Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
     * 
     */
    private @Nullable String trainingCropSize;
    /**
     * @return Validation batch size. Must be a positive integer.
     * 
     */
    private @Nullable String validationBatchSize;
    /**
     * @return Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
     * 
     */
    private @Nullable String validationCropSize;
    /**
     * @return Image size to which to resize before cropping for validation dataset. Must be a positive integer.
     * 
     */
    private @Nullable String validationResizeSize;
    /**
     * @return Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
     * 
     */
    private @Nullable String warmupCosineLRCycles;
    /**
     * @return Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
     * 
     */
    private @Nullable String warmupCosineLRWarmupEpochs;
    /**
     * @return Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
     * 
     */
    private @Nullable String weightDecay;
    /**
     * @return Weighted loss. The accepted values are 0 for no weighted loss.
     * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
     * 
     */
    private @Nullable String weightedLoss;

    private ImageModelDistributionSettingsClassificationResponse() {}
    /**
     * @return Enable AMSGrad when optimizer is 'adam' or 'adamw'.
     * 
     */
    public Optional amsGradient() {
        return Optional.ofNullable(this.amsGradient);
    }
    /**
     * @return Settings for using Augmentations.
     * 
     */
    public Optional augmentations() {
        return Optional.ofNullable(this.augmentations);
    }
    /**
     * @return Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    public Optional beta1() {
        return Optional.ofNullable(this.beta1);
    }
    /**
     * @return Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
     * 
     */
    public Optional beta2() {
        return Optional.ofNullable(this.beta2);
    }
    /**
     * @return Whether to use distributer training.
     * 
     */
    public Optional distributed() {
        return Optional.ofNullable(this.distributed);
    }
    /**
     * @return Enable early stopping logic during training.
     * 
     */
    public Optional earlyStopping() {
        return Optional.ofNullable(this.earlyStopping);
    }
    /**
     * @return Minimum number of epochs or validation evaluations to wait before primary metric improvement
     * is tracked for early stopping. Must be a positive integer.
     * 
     */
    public Optional earlyStoppingDelay() {
        return Optional.ofNullable(this.earlyStoppingDelay);
    }
    /**
     * @return Minimum number of epochs or validation evaluations with no primary metric improvement before
     * the run is stopped. Must be a positive integer.
     * 
     */
    public Optional earlyStoppingPatience() {
        return Optional.ofNullable(this.earlyStoppingPatience);
    }
    /**
     * @return Enable normalization when exporting ONNX model.
     * 
     */
    public Optional enableOnnxNormalization() {
        return Optional.ofNullable(this.enableOnnxNormalization);
    }
    /**
     * @return Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
     * 
     */
    public Optional evaluationFrequency() {
        return Optional.ofNullable(this.evaluationFrequency);
    }
    /**
     * @return Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
     * updating the model weights while accumulating the gradients of those steps, and then using
     * the accumulated gradients to compute the weight updates. Must be a positive integer.
     * 
     */
    public Optional gradientAccumulationStep() {
        return Optional.ofNullable(this.gradientAccumulationStep);
    }
    /**
     * @return Number of layers to freeze for the model. Must be a positive integer.
     * For instance, passing 2 as value for 'seresnext' means
     * freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
     * see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    public Optional layersToFreeze() {
        return Optional.ofNullable(this.layersToFreeze);
    }
    /**
     * @return Initial learning rate. Must be a float in the range [0, 1].
     * 
     */
    public Optional learningRate() {
        return Optional.ofNullable(this.learningRate);
    }
    /**
     * @return Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
     * 
     */
    public Optional learningRateScheduler() {
        return Optional.ofNullable(this.learningRateScheduler);
    }
    /**
     * @return Name of the model to use for training.
     * For more information on the available models please visit the official documentation:
     * https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
     * 
     */
    public Optional modelName() {
        return Optional.ofNullable(this.modelName);
    }
    /**
     * @return Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
     * 
     */
    public Optional momentum() {
        return Optional.ofNullable(this.momentum);
    }
    /**
     * @return Enable nesterov when optimizer is 'sgd'.
     * 
     */
    public Optional nesterov() {
        return Optional.ofNullable(this.nesterov);
    }
    /**
     * @return Number of training epochs. Must be a positive integer.
     * 
     */
    public Optional numberOfEpochs() {
        return Optional.ofNullable(this.numberOfEpochs);
    }
    /**
     * @return Number of data loader workers. Must be a non-negative integer.
     * 
     */
    public Optional numberOfWorkers() {
        return Optional.ofNullable(this.numberOfWorkers);
    }
    /**
     * @return Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
     * 
     */
    public Optional optimizer() {
        return Optional.ofNullable(this.optimizer);
    }
    /**
     * @return Random seed to be used when using deterministic training.
     * 
     */
    public Optional randomSeed() {
        return Optional.ofNullable(this.randomSeed);
    }
    /**
     * @return Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
     * 
     */
    public Optional stepLRGamma() {
        return Optional.ofNullable(this.stepLRGamma);
    }
    /**
     * @return Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
     * 
     */
    public Optional stepLRStepSize() {
        return Optional.ofNullable(this.stepLRStepSize);
    }
    /**
     * @return Training batch size. Must be a positive integer.
     * 
     */
    public Optional trainingBatchSize() {
        return Optional.ofNullable(this.trainingBatchSize);
    }
    /**
     * @return Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
     * 
     */
    public Optional trainingCropSize() {
        return Optional.ofNullable(this.trainingCropSize);
    }
    /**
     * @return Validation batch size. Must be a positive integer.
     * 
     */
    public Optional validationBatchSize() {
        return Optional.ofNullable(this.validationBatchSize);
    }
    /**
     * @return Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
     * 
     */
    public Optional validationCropSize() {
        return Optional.ofNullable(this.validationCropSize);
    }
    /**
     * @return Image size to which to resize before cropping for validation dataset. Must be a positive integer.
     * 
     */
    public Optional validationResizeSize() {
        return Optional.ofNullable(this.validationResizeSize);
    }
    /**
     * @return Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
     * 
     */
    public Optional warmupCosineLRCycles() {
        return Optional.ofNullable(this.warmupCosineLRCycles);
    }
    /**
     * @return Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
     * 
     */
    public Optional warmupCosineLRWarmupEpochs() {
        return Optional.ofNullable(this.warmupCosineLRWarmupEpochs);
    }
    /**
     * @return Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
     * 
     */
    public Optional weightDecay() {
        return Optional.ofNullable(this.weightDecay);
    }
    /**
     * @return Weighted loss. The accepted values are 0 for no weighted loss.
     * 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
     * 
     */
    public Optional weightedLoss() {
        return Optional.ofNullable(this.weightedLoss);
    }

    public static Builder builder() {
        return new Builder();
    }

    public static Builder builder(ImageModelDistributionSettingsClassificationResponse defaults) {
        return new Builder(defaults);
    }
    @CustomType.Builder
    public static final class Builder {
        private @Nullable String amsGradient;
        private @Nullable String augmentations;
        private @Nullable String beta1;
        private @Nullable String beta2;
        private @Nullable String distributed;
        private @Nullable String earlyStopping;
        private @Nullable String earlyStoppingDelay;
        private @Nullable String earlyStoppingPatience;
        private @Nullable String enableOnnxNormalization;
        private @Nullable String evaluationFrequency;
        private @Nullable String gradientAccumulationStep;
        private @Nullable String layersToFreeze;
        private @Nullable String learningRate;
        private @Nullable String learningRateScheduler;
        private @Nullable String modelName;
        private @Nullable String momentum;
        private @Nullable String nesterov;
        private @Nullable String numberOfEpochs;
        private @Nullable String numberOfWorkers;
        private @Nullable String optimizer;
        private @Nullable String randomSeed;
        private @Nullable String stepLRGamma;
        private @Nullable String stepLRStepSize;
        private @Nullable String trainingBatchSize;
        private @Nullable String trainingCropSize;
        private @Nullable String validationBatchSize;
        private @Nullable String validationCropSize;
        private @Nullable String validationResizeSize;
        private @Nullable String warmupCosineLRCycles;
        private @Nullable String warmupCosineLRWarmupEpochs;
        private @Nullable String weightDecay;
        private @Nullable String weightedLoss;
        public Builder() {}
        public Builder(ImageModelDistributionSettingsClassificationResponse defaults) {
    	      Objects.requireNonNull(defaults);
    	      this.amsGradient = defaults.amsGradient;
    	      this.augmentations = defaults.augmentations;
    	      this.beta1 = defaults.beta1;
    	      this.beta2 = defaults.beta2;
    	      this.distributed = defaults.distributed;
    	      this.earlyStopping = defaults.earlyStopping;
    	      this.earlyStoppingDelay = defaults.earlyStoppingDelay;
    	      this.earlyStoppingPatience = defaults.earlyStoppingPatience;
    	      this.enableOnnxNormalization = defaults.enableOnnxNormalization;
    	      this.evaluationFrequency = defaults.evaluationFrequency;
    	      this.gradientAccumulationStep = defaults.gradientAccumulationStep;
    	      this.layersToFreeze = defaults.layersToFreeze;
    	      this.learningRate = defaults.learningRate;
    	      this.learningRateScheduler = defaults.learningRateScheduler;
    	      this.modelName = defaults.modelName;
    	      this.momentum = defaults.momentum;
    	      this.nesterov = defaults.nesterov;
    	      this.numberOfEpochs = defaults.numberOfEpochs;
    	      this.numberOfWorkers = defaults.numberOfWorkers;
    	      this.optimizer = defaults.optimizer;
    	      this.randomSeed = defaults.randomSeed;
    	      this.stepLRGamma = defaults.stepLRGamma;
    	      this.stepLRStepSize = defaults.stepLRStepSize;
    	      this.trainingBatchSize = defaults.trainingBatchSize;
    	      this.trainingCropSize = defaults.trainingCropSize;
    	      this.validationBatchSize = defaults.validationBatchSize;
    	      this.validationCropSize = defaults.validationCropSize;
    	      this.validationResizeSize = defaults.validationResizeSize;
    	      this.warmupCosineLRCycles = defaults.warmupCosineLRCycles;
    	      this.warmupCosineLRWarmupEpochs = defaults.warmupCosineLRWarmupEpochs;
    	      this.weightDecay = defaults.weightDecay;
    	      this.weightedLoss = defaults.weightedLoss;
        }

        @CustomType.Setter
        public Builder amsGradient(@Nullable String amsGradient) {

            this.amsGradient = amsGradient;
            return this;
        }
        @CustomType.Setter
        public Builder augmentations(@Nullable String augmentations) {

            this.augmentations = augmentations;
            return this;
        }
        @CustomType.Setter
        public Builder beta1(@Nullable String beta1) {

            this.beta1 = beta1;
            return this;
        }
        @CustomType.Setter
        public Builder beta2(@Nullable String beta2) {

            this.beta2 = beta2;
            return this;
        }
        @CustomType.Setter
        public Builder distributed(@Nullable String distributed) {

            this.distributed = distributed;
            return this;
        }
        @CustomType.Setter
        public Builder earlyStopping(@Nullable String earlyStopping) {

            this.earlyStopping = earlyStopping;
            return this;
        }
        @CustomType.Setter
        public Builder earlyStoppingDelay(@Nullable String earlyStoppingDelay) {

            this.earlyStoppingDelay = earlyStoppingDelay;
            return this;
        }
        @CustomType.Setter
        public Builder earlyStoppingPatience(@Nullable String earlyStoppingPatience) {

            this.earlyStoppingPatience = earlyStoppingPatience;
            return this;
        }
        @CustomType.Setter
        public Builder enableOnnxNormalization(@Nullable String enableOnnxNormalization) {

            this.enableOnnxNormalization = enableOnnxNormalization;
            return this;
        }
        @CustomType.Setter
        public Builder evaluationFrequency(@Nullable String evaluationFrequency) {

            this.evaluationFrequency = evaluationFrequency;
            return this;
        }
        @CustomType.Setter
        public Builder gradientAccumulationStep(@Nullable String gradientAccumulationStep) {

            this.gradientAccumulationStep = gradientAccumulationStep;
            return this;
        }
        @CustomType.Setter
        public Builder layersToFreeze(@Nullable String layersToFreeze) {

            this.layersToFreeze = layersToFreeze;
            return this;
        }
        @CustomType.Setter
        public Builder learningRate(@Nullable String learningRate) {

            this.learningRate = learningRate;
            return this;
        }
        @CustomType.Setter
        public Builder learningRateScheduler(@Nullable String learningRateScheduler) {

            this.learningRateScheduler = learningRateScheduler;
            return this;
        }
        @CustomType.Setter
        public Builder modelName(@Nullable String modelName) {

            this.modelName = modelName;
            return this;
        }
        @CustomType.Setter
        public Builder momentum(@Nullable String momentum) {

            this.momentum = momentum;
            return this;
        }
        @CustomType.Setter
        public Builder nesterov(@Nullable String nesterov) {

            this.nesterov = nesterov;
            return this;
        }
        @CustomType.Setter
        public Builder numberOfEpochs(@Nullable String numberOfEpochs) {

            this.numberOfEpochs = numberOfEpochs;
            return this;
        }
        @CustomType.Setter
        public Builder numberOfWorkers(@Nullable String numberOfWorkers) {

            this.numberOfWorkers = numberOfWorkers;
            return this;
        }
        @CustomType.Setter
        public Builder optimizer(@Nullable String optimizer) {

            this.optimizer = optimizer;
            return this;
        }
        @CustomType.Setter
        public Builder randomSeed(@Nullable String randomSeed) {

            this.randomSeed = randomSeed;
            return this;
        }
        @CustomType.Setter
        public Builder stepLRGamma(@Nullable String stepLRGamma) {

            this.stepLRGamma = stepLRGamma;
            return this;
        }
        @CustomType.Setter
        public Builder stepLRStepSize(@Nullable String stepLRStepSize) {

            this.stepLRStepSize = stepLRStepSize;
            return this;
        }
        @CustomType.Setter
        public Builder trainingBatchSize(@Nullable String trainingBatchSize) {

            this.trainingBatchSize = trainingBatchSize;
            return this;
        }
        @CustomType.Setter
        public Builder trainingCropSize(@Nullable String trainingCropSize) {

            this.trainingCropSize = trainingCropSize;
            return this;
        }
        @CustomType.Setter
        public Builder validationBatchSize(@Nullable String validationBatchSize) {

            this.validationBatchSize = validationBatchSize;
            return this;
        }
        @CustomType.Setter
        public Builder validationCropSize(@Nullable String validationCropSize) {

            this.validationCropSize = validationCropSize;
            return this;
        }
        @CustomType.Setter
        public Builder validationResizeSize(@Nullable String validationResizeSize) {

            this.validationResizeSize = validationResizeSize;
            return this;
        }
        @CustomType.Setter
        public Builder warmupCosineLRCycles(@Nullable String warmupCosineLRCycles) {

            this.warmupCosineLRCycles = warmupCosineLRCycles;
            return this;
        }
        @CustomType.Setter
        public Builder warmupCosineLRWarmupEpochs(@Nullable String warmupCosineLRWarmupEpochs) {

            this.warmupCosineLRWarmupEpochs = warmupCosineLRWarmupEpochs;
            return this;
        }
        @CustomType.Setter
        public Builder weightDecay(@Nullable String weightDecay) {

            this.weightDecay = weightDecay;
            return this;
        }
        @CustomType.Setter
        public Builder weightedLoss(@Nullable String weightedLoss) {

            this.weightedLoss = weightedLoss;
            return this;
        }
        public ImageModelDistributionSettingsClassificationResponse build() {
            final var _resultValue = new ImageModelDistributionSettingsClassificationResponse();
            _resultValue.amsGradient = amsGradient;
            _resultValue.augmentations = augmentations;
            _resultValue.beta1 = beta1;
            _resultValue.beta2 = beta2;
            _resultValue.distributed = distributed;
            _resultValue.earlyStopping = earlyStopping;
            _resultValue.earlyStoppingDelay = earlyStoppingDelay;
            _resultValue.earlyStoppingPatience = earlyStoppingPatience;
            _resultValue.enableOnnxNormalization = enableOnnxNormalization;
            _resultValue.evaluationFrequency = evaluationFrequency;
            _resultValue.gradientAccumulationStep = gradientAccumulationStep;
            _resultValue.layersToFreeze = layersToFreeze;
            _resultValue.learningRate = learningRate;
            _resultValue.learningRateScheduler = learningRateScheduler;
            _resultValue.modelName = modelName;
            _resultValue.momentum = momentum;
            _resultValue.nesterov = nesterov;
            _resultValue.numberOfEpochs = numberOfEpochs;
            _resultValue.numberOfWorkers = numberOfWorkers;
            _resultValue.optimizer = optimizer;
            _resultValue.randomSeed = randomSeed;
            _resultValue.stepLRGamma = stepLRGamma;
            _resultValue.stepLRStepSize = stepLRStepSize;
            _resultValue.trainingBatchSize = trainingBatchSize;
            _resultValue.trainingCropSize = trainingCropSize;
            _resultValue.validationBatchSize = validationBatchSize;
            _resultValue.validationCropSize = validationCropSize;
            _resultValue.validationResizeSize = validationResizeSize;
            _resultValue.warmupCosineLRCycles = warmupCosineLRCycles;
            _resultValue.warmupCosineLRWarmupEpochs = warmupCosineLRWarmupEpochs;
            _resultValue.weightDecay = weightDecay;
            _resultValue.weightedLoss = weightedLoss;
            return _resultValue;
        }
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy