
com.pulumi.azurenative.machinelearningservices.kotlin.inputs.ImageModelDistributionSettingsClassificationArgs.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.azurenative.machinelearningservices.kotlin.inputs
import com.pulumi.azurenative.machinelearningservices.inputs.ImageModelDistributionSettingsClassificationArgs.builder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.String
import kotlin.Suppress
import kotlin.jvm.JvmName
/**
* Distribution expressions to sweep over values of model settings.
*
* Some examples are:
* ```
* ModelName = "choice('seresnext', 'resnest50')";
* LearningRate = "uniform(0.001, 0.01)";
* LayersToFreeze = "choice(0, 2)";
* ```
* For more details on how to compose distribution expressions please check the documentation:
* https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
* For more information on the available settings please visit the official documentation:
* https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
* @property amsGradient Enable AMSGrad when optimizer is 'adam' or 'adamw'.
* @property augmentations Settings for using Augmentations.
* @property beta1 Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
* @property beta2 Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
* @property distributed Whether to use distributer training.
* @property earlyStopping Enable early stopping logic during training.
* @property earlyStoppingDelay Minimum number of epochs or validation evaluations to wait before primary metric improvement
* is tracked for early stopping. Must be a positive integer.
* @property earlyStoppingPatience Minimum number of epochs or validation evaluations with no primary metric improvement before
* the run is stopped. Must be a positive integer.
* @property enableOnnxNormalization Enable normalization when exporting ONNX model.
* @property evaluationFrequency Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
* @property gradientAccumulationStep Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
* updating the model weights while accumulating the gradients of those steps, and then using
* the accumulated gradients to compute the weight updates. Must be a positive integer.
* @property layersToFreeze Number of layers to freeze for the model. Must be a positive integer.
* For instance, passing 2 as value for 'seresnext' means
* freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
* see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
* @property learningRate Initial learning rate. Must be a float in the range [0, 1].
* @property learningRateScheduler Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
* @property modelName Name of the model to use for training.
* For more information on the available models please visit the official documentation:
* https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
* @property momentum Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
* @property nesterov Enable nesterov when optimizer is 'sgd'.
* @property numberOfEpochs Number of training epochs. Must be a positive integer.
* @property numberOfWorkers Number of data loader workers. Must be a non-negative integer.
* @property optimizer Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
* @property randomSeed Random seed to be used when using deterministic training.
* @property stepLRGamma Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
* @property stepLRStepSize Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
* @property trainingBatchSize Training batch size. Must be a positive integer.
* @property trainingCropSize Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
* @property validationBatchSize Validation batch size. Must be a positive integer.
* @property validationCropSize Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
* @property validationResizeSize Image size to which to resize before cropping for validation dataset. Must be a positive integer.
* @property warmupCosineLRCycles Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
* @property warmupCosineLRWarmupEpochs Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
* @property weightDecay Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
* @property weightedLoss Weighted loss. The accepted values are 0 for no weighted loss.
* 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
*/
public data class ImageModelDistributionSettingsClassificationArgs(
public val amsGradient: Output? = null,
public val augmentations: Output? = null,
public val beta1: Output? = null,
public val beta2: Output? = null,
public val distributed: Output? = null,
public val earlyStopping: Output? = null,
public val earlyStoppingDelay: Output? = null,
public val earlyStoppingPatience: Output? = null,
public val enableOnnxNormalization: Output? = null,
public val evaluationFrequency: Output? = null,
public val gradientAccumulationStep: Output? = null,
public val layersToFreeze: Output? = null,
public val learningRate: Output? = null,
public val learningRateScheduler: Output? = null,
public val modelName: Output? = null,
public val momentum: Output? = null,
public val nesterov: Output? = null,
public val numberOfEpochs: Output? = null,
public val numberOfWorkers: Output? = null,
public val optimizer: Output? = null,
public val randomSeed: Output? = null,
public val stepLRGamma: Output? = null,
public val stepLRStepSize: Output? = null,
public val trainingBatchSize: Output? = null,
public val trainingCropSize: Output? = null,
public val validationBatchSize: Output? = null,
public val validationCropSize: Output? = null,
public val validationResizeSize: Output? = null,
public val warmupCosineLRCycles: Output? = null,
public val warmupCosineLRWarmupEpochs: Output? = null,
public val weightDecay: Output? = null,
public val weightedLoss: Output? = null,
) :
ConvertibleToJava {
override fun toJava(): com.pulumi.azurenative.machinelearningservices.inputs.ImageModelDistributionSettingsClassificationArgs =
com.pulumi.azurenative.machinelearningservices.inputs.ImageModelDistributionSettingsClassificationArgs.builder()
.amsGradient(amsGradient?.applyValue({ args0 -> args0 }))
.augmentations(augmentations?.applyValue({ args0 -> args0 }))
.beta1(beta1?.applyValue({ args0 -> args0 }))
.beta2(beta2?.applyValue({ args0 -> args0 }))
.distributed(distributed?.applyValue({ args0 -> args0 }))
.earlyStopping(earlyStopping?.applyValue({ args0 -> args0 }))
.earlyStoppingDelay(earlyStoppingDelay?.applyValue({ args0 -> args0 }))
.earlyStoppingPatience(earlyStoppingPatience?.applyValue({ args0 -> args0 }))
.enableOnnxNormalization(enableOnnxNormalization?.applyValue({ args0 -> args0 }))
.evaluationFrequency(evaluationFrequency?.applyValue({ args0 -> args0 }))
.gradientAccumulationStep(gradientAccumulationStep?.applyValue({ args0 -> args0 }))
.layersToFreeze(layersToFreeze?.applyValue({ args0 -> args0 }))
.learningRate(learningRate?.applyValue({ args0 -> args0 }))
.learningRateScheduler(learningRateScheduler?.applyValue({ args0 -> args0 }))
.modelName(modelName?.applyValue({ args0 -> args0 }))
.momentum(momentum?.applyValue({ args0 -> args0 }))
.nesterov(nesterov?.applyValue({ args0 -> args0 }))
.numberOfEpochs(numberOfEpochs?.applyValue({ args0 -> args0 }))
.numberOfWorkers(numberOfWorkers?.applyValue({ args0 -> args0 }))
.optimizer(optimizer?.applyValue({ args0 -> args0 }))
.randomSeed(randomSeed?.applyValue({ args0 -> args0 }))
.stepLRGamma(stepLRGamma?.applyValue({ args0 -> args0 }))
.stepLRStepSize(stepLRStepSize?.applyValue({ args0 -> args0 }))
.trainingBatchSize(trainingBatchSize?.applyValue({ args0 -> args0 }))
.trainingCropSize(trainingCropSize?.applyValue({ args0 -> args0 }))
.validationBatchSize(validationBatchSize?.applyValue({ args0 -> args0 }))
.validationCropSize(validationCropSize?.applyValue({ args0 -> args0 }))
.validationResizeSize(validationResizeSize?.applyValue({ args0 -> args0 }))
.warmupCosineLRCycles(warmupCosineLRCycles?.applyValue({ args0 -> args0 }))
.warmupCosineLRWarmupEpochs(warmupCosineLRWarmupEpochs?.applyValue({ args0 -> args0 }))
.weightDecay(weightDecay?.applyValue({ args0 -> args0 }))
.weightedLoss(weightedLoss?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [ImageModelDistributionSettingsClassificationArgs].
*/
@PulumiTagMarker
public class ImageModelDistributionSettingsClassificationArgsBuilder internal constructor() {
private var amsGradient: Output? = null
private var augmentations: Output? = null
private var beta1: Output? = null
private var beta2: Output? = null
private var distributed: Output? = null
private var earlyStopping: Output? = null
private var earlyStoppingDelay: Output? = null
private var earlyStoppingPatience: Output? = null
private var enableOnnxNormalization: Output? = null
private var evaluationFrequency: Output? = null
private var gradientAccumulationStep: Output? = null
private var layersToFreeze: Output? = null
private var learningRate: Output? = null
private var learningRateScheduler: Output? = null
private var modelName: Output? = null
private var momentum: Output? = null
private var nesterov: Output? = null
private var numberOfEpochs: Output? = null
private var numberOfWorkers: Output? = null
private var optimizer: Output? = null
private var randomSeed: Output? = null
private var stepLRGamma: Output? = null
private var stepLRStepSize: Output? = null
private var trainingBatchSize: Output? = null
private var trainingCropSize: Output? = null
private var validationBatchSize: Output? = null
private var validationCropSize: Output? = null
private var validationResizeSize: Output? = null
private var warmupCosineLRCycles: Output? = null
private var warmupCosineLRWarmupEpochs: Output? = null
private var weightDecay: Output? = null
private var weightedLoss: Output? = null
/**
* @param value Enable AMSGrad when optimizer is 'adam' or 'adamw'.
*/
@JvmName("pmmrmojoufftidvm")
public suspend fun amsGradient(`value`: Output) {
this.amsGradient = value
}
/**
* @param value Settings for using Augmentations.
*/
@JvmName("ylsbwnayemsbgajc")
public suspend fun augmentations(`value`: Output) {
this.augmentations = value
}
/**
* @param value Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
*/
@JvmName("goxskwoukvxsrsgv")
public suspend fun beta1(`value`: Output) {
this.beta1 = value
}
/**
* @param value Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
*/
@JvmName("dtqlwuqrxdseafru")
public suspend fun beta2(`value`: Output) {
this.beta2 = value
}
/**
* @param value Whether to use distributer training.
*/
@JvmName("qvwvjcckawcykgei")
public suspend fun distributed(`value`: Output) {
this.distributed = value
}
/**
* @param value Enable early stopping logic during training.
*/
@JvmName("hnultfxvoqhxrxmj")
public suspend fun earlyStopping(`value`: Output) {
this.earlyStopping = value
}
/**
* @param value Minimum number of epochs or validation evaluations to wait before primary metric improvement
* is tracked for early stopping. Must be a positive integer.
*/
@JvmName("flwjcqksqghbsojy")
public suspend fun earlyStoppingDelay(`value`: Output) {
this.earlyStoppingDelay = value
}
/**
* @param value Minimum number of epochs or validation evaluations with no primary metric improvement before
* the run is stopped. Must be a positive integer.
*/
@JvmName("bpuncqdnrwfpijjs")
public suspend fun earlyStoppingPatience(`value`: Output) {
this.earlyStoppingPatience = value
}
/**
* @param value Enable normalization when exporting ONNX model.
*/
@JvmName("fhdabrtcwbiyxujt")
public suspend fun enableOnnxNormalization(`value`: Output) {
this.enableOnnxNormalization = value
}
/**
* @param value Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
*/
@JvmName("lfgvxvtjgaflqtdq")
public suspend fun evaluationFrequency(`value`: Output) {
this.evaluationFrequency = value
}
/**
* @param value Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
* updating the model weights while accumulating the gradients of those steps, and then using
* the accumulated gradients to compute the weight updates. Must be a positive integer.
*/
@JvmName("kwqccoxlpvqngkbk")
public suspend fun gradientAccumulationStep(`value`: Output) {
this.gradientAccumulationStep = value
}
/**
* @param value Number of layers to freeze for the model. Must be a positive integer.
* For instance, passing 2 as value for 'seresnext' means
* freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
* see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
*/
@JvmName("ctraxliipmpcleew")
public suspend fun layersToFreeze(`value`: Output) {
this.layersToFreeze = value
}
/**
* @param value Initial learning rate. Must be a float in the range [0, 1].
*/
@JvmName("rxtubcmollnxoaxl")
public suspend fun learningRate(`value`: Output) {
this.learningRate = value
}
/**
* @param value Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
*/
@JvmName("okmpiihtjjdgdvhs")
public suspend fun learningRateScheduler(`value`: Output) {
this.learningRateScheduler = value
}
/**
* @param value Name of the model to use for training.
* For more information on the available models please visit the official documentation:
* https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
*/
@JvmName("nejbkrqthwxvccrx")
public suspend fun modelName(`value`: Output) {
this.modelName = value
}
/**
* @param value Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
*/
@JvmName("piagnqfuxihfqibj")
public suspend fun momentum(`value`: Output) {
this.momentum = value
}
/**
* @param value Enable nesterov when optimizer is 'sgd'.
*/
@JvmName("cahexsujxohiognw")
public suspend fun nesterov(`value`: Output) {
this.nesterov = value
}
/**
* @param value Number of training epochs. Must be a positive integer.
*/
@JvmName("yooxoheopmssdruv")
public suspend fun numberOfEpochs(`value`: Output) {
this.numberOfEpochs = value
}
/**
* @param value Number of data loader workers. Must be a non-negative integer.
*/
@JvmName("wefkldrurbnxagro")
public suspend fun numberOfWorkers(`value`: Output) {
this.numberOfWorkers = value
}
/**
* @param value Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
*/
@JvmName("mueuaovawpxnixup")
public suspend fun optimizer(`value`: Output) {
this.optimizer = value
}
/**
* @param value Random seed to be used when using deterministic training.
*/
@JvmName("wuqcjgdkxtsdtulf")
public suspend fun randomSeed(`value`: Output) {
this.randomSeed = value
}
/**
* @param value Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
*/
@JvmName("imspogafymbmqles")
public suspend fun stepLRGamma(`value`: Output) {
this.stepLRGamma = value
}
/**
* @param value Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
*/
@JvmName("svcnanjwheixshar")
public suspend fun stepLRStepSize(`value`: Output) {
this.stepLRStepSize = value
}
/**
* @param value Training batch size. Must be a positive integer.
*/
@JvmName("fyiosdjkqxbhdvsp")
public suspend fun trainingBatchSize(`value`: Output) {
this.trainingBatchSize = value
}
/**
* @param value Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
*/
@JvmName("yyekmopbyvxxmuof")
public suspend fun trainingCropSize(`value`: Output) {
this.trainingCropSize = value
}
/**
* @param value Validation batch size. Must be a positive integer.
*/
@JvmName("ttuaxgtyftngpfpd")
public suspend fun validationBatchSize(`value`: Output) {
this.validationBatchSize = value
}
/**
* @param value Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
*/
@JvmName("shvjcefwnhttuxvb")
public suspend fun validationCropSize(`value`: Output) {
this.validationCropSize = value
}
/**
* @param value Image size to which to resize before cropping for validation dataset. Must be a positive integer.
*/
@JvmName("hxwppvdmkwtbtrde")
public suspend fun validationResizeSize(`value`: Output) {
this.validationResizeSize = value
}
/**
* @param value Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
*/
@JvmName("bqrbnsawaovdbxcu")
public suspend fun warmupCosineLRCycles(`value`: Output) {
this.warmupCosineLRCycles = value
}
/**
* @param value Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
*/
@JvmName("hqsvjssqstgestar")
public suspend fun warmupCosineLRWarmupEpochs(`value`: Output) {
this.warmupCosineLRWarmupEpochs = value
}
/**
* @param value Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
*/
@JvmName("dirmhgkdjbwxgslr")
public suspend fun weightDecay(`value`: Output) {
this.weightDecay = value
}
/**
* @param value Weighted loss. The accepted values are 0 for no weighted loss.
* 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
*/
@JvmName("nshhubgecwmtubbj")
public suspend fun weightedLoss(`value`: Output) {
this.weightedLoss = value
}
/**
* @param value Enable AMSGrad when optimizer is 'adam' or 'adamw'.
*/
@JvmName("qsgivaynghaxggbo")
public suspend fun amsGradient(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.amsGradient = mapped
}
/**
* @param value Settings for using Augmentations.
*/
@JvmName("buyrafkhgubrblos")
public suspend fun augmentations(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.augmentations = mapped
}
/**
* @param value Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
*/
@JvmName("dyomkwliqwmkpdxk")
public suspend fun beta1(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.beta1 = mapped
}
/**
* @param value Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1].
*/
@JvmName("grvbwskrschoxidu")
public suspend fun beta2(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.beta2 = mapped
}
/**
* @param value Whether to use distributer training.
*/
@JvmName("bxhoyuqjyiqbgjep")
public suspend fun distributed(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.distributed = mapped
}
/**
* @param value Enable early stopping logic during training.
*/
@JvmName("fwegytyesiqvggid")
public suspend fun earlyStopping(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.earlyStopping = mapped
}
/**
* @param value Minimum number of epochs or validation evaluations to wait before primary metric improvement
* is tracked for early stopping. Must be a positive integer.
*/
@JvmName("wklarmkengympblt")
public suspend fun earlyStoppingDelay(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.earlyStoppingDelay = mapped
}
/**
* @param value Minimum number of epochs or validation evaluations with no primary metric improvement before
* the run is stopped. Must be a positive integer.
*/
@JvmName("pvuapffyjtbcrain")
public suspend fun earlyStoppingPatience(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.earlyStoppingPatience = mapped
}
/**
* @param value Enable normalization when exporting ONNX model.
*/
@JvmName("yxbvpgdhbbfynvcx")
public suspend fun enableOnnxNormalization(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.enableOnnxNormalization = mapped
}
/**
* @param value Frequency to evaluate validation dataset to get metric scores. Must be a positive integer.
*/
@JvmName("oqhipwthxwnrntxf")
public suspend fun evaluationFrequency(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.evaluationFrequency = mapped
}
/**
* @param value Gradient accumulation means running a configured number of "GradAccumulationStep" steps without
* updating the model weights while accumulating the gradients of those steps, and then using
* the accumulated gradients to compute the weight updates. Must be a positive integer.
*/
@JvmName("npesyoyimajeyxai")
public suspend fun gradientAccumulationStep(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.gradientAccumulationStep = mapped
}
/**
* @param value Number of layers to freeze for the model. Must be a positive integer.
* For instance, passing 2 as value for 'seresnext' means
* freezing layer0 and layer1. For a full list of models supported and details on layer freeze, please
* see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
*/
@JvmName("oioueejudhucyppw")
public suspend fun layersToFreeze(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.layersToFreeze = mapped
}
/**
* @param value Initial learning rate. Must be a float in the range [0, 1].
*/
@JvmName("lpsoxllllcicogtx")
public suspend fun learningRate(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.learningRate = mapped
}
/**
* @param value Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'.
*/
@JvmName("obssgtskyhpjqkws")
public suspend fun learningRateScheduler(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.learningRateScheduler = mapped
}
/**
* @param value Name of the model to use for training.
* For more information on the available models please visit the official documentation:
* https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
*/
@JvmName("clkhfclmnfffggxv")
public suspend fun modelName(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.modelName = mapped
}
/**
* @param value Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
*/
@JvmName("wfimhtkilvjwpndv")
public suspend fun momentum(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.momentum = mapped
}
/**
* @param value Enable nesterov when optimizer is 'sgd'.
*/
@JvmName("sisoiuutyfcfkgeo")
public suspend fun nesterov(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.nesterov = mapped
}
/**
* @param value Number of training epochs. Must be a positive integer.
*/
@JvmName("ddajjjvpnbhnschw")
public suspend fun numberOfEpochs(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.numberOfEpochs = mapped
}
/**
* @param value Number of data loader workers. Must be a non-negative integer.
*/
@JvmName("bfeqcwxfkqghghwj")
public suspend fun numberOfWorkers(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.numberOfWorkers = mapped
}
/**
* @param value Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
*/
@JvmName("yjgelxrnhgjrvuta")
public suspend fun optimizer(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.optimizer = mapped
}
/**
* @param value Random seed to be used when using deterministic training.
*/
@JvmName("pcocjpgupgdknivw")
public suspend fun randomSeed(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.randomSeed = mapped
}
/**
* @param value Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1].
*/
@JvmName("iadxkllfowtlcahg")
public suspend fun stepLRGamma(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.stepLRGamma = mapped
}
/**
* @param value Value of step size when learning rate scheduler is 'step'. Must be a positive integer.
*/
@JvmName("dmjmutkneqbjlxag")
public suspend fun stepLRStepSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.stepLRStepSize = mapped
}
/**
* @param value Training batch size. Must be a positive integer.
*/
@JvmName("qaeowputsfoyfmda")
public suspend fun trainingBatchSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.trainingBatchSize = mapped
}
/**
* @param value Image crop size that is input to the neural network for the training dataset. Must be a positive integer.
*/
@JvmName("usxorbnwrppkkgst")
public suspend fun trainingCropSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.trainingCropSize = mapped
}
/**
* @param value Validation batch size. Must be a positive integer.
*/
@JvmName("dqfpmjtkoxyfvbly")
public suspend fun validationBatchSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.validationBatchSize = mapped
}
/**
* @param value Image crop size that is input to the neural network for the validation dataset. Must be a positive integer.
*/
@JvmName("gywqhkratresufwk")
public suspend fun validationCropSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.validationCropSize = mapped
}
/**
* @param value Image size to which to resize before cropping for validation dataset. Must be a positive integer.
*/
@JvmName("davpjkvlkicyebgd")
public suspend fun validationResizeSize(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.validationResizeSize = mapped
}
/**
* @param value Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1].
*/
@JvmName("lhxldffwdgymbmby")
public suspend fun warmupCosineLRCycles(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.warmupCosineLRCycles = mapped
}
/**
* @param value Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer.
*/
@JvmName("bufypfdiewcwrapn")
public suspend fun warmupCosineLRWarmupEpochs(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.warmupCosineLRWarmupEpochs = mapped
}
/**
* @param value Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1].
*/
@JvmName("gwemjsdydeiydowt")
public suspend fun weightDecay(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.weightDecay = mapped
}
/**
* @param value Weighted loss. The accepted values are 0 for no weighted loss.
* 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be 0 or 1 or 2.
*/
@JvmName("mjrboldemkbedayw")
public suspend fun weightedLoss(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.weightedLoss = mapped
}
internal fun build(): ImageModelDistributionSettingsClassificationArgs =
ImageModelDistributionSettingsClassificationArgs(
amsGradient = amsGradient,
augmentations = augmentations,
beta1 = beta1,
beta2 = beta2,
distributed = distributed,
earlyStopping = earlyStopping,
earlyStoppingDelay = earlyStoppingDelay,
earlyStoppingPatience = earlyStoppingPatience,
enableOnnxNormalization = enableOnnxNormalization,
evaluationFrequency = evaluationFrequency,
gradientAccumulationStep = gradientAccumulationStep,
layersToFreeze = layersToFreeze,
learningRate = learningRate,
learningRateScheduler = learningRateScheduler,
modelName = modelName,
momentum = momentum,
nesterov = nesterov,
numberOfEpochs = numberOfEpochs,
numberOfWorkers = numberOfWorkers,
optimizer = optimizer,
randomSeed = randomSeed,
stepLRGamma = stepLRGamma,
stepLRStepSize = stepLRStepSize,
trainingBatchSize = trainingBatchSize,
trainingCropSize = trainingCropSize,
validationBatchSize = validationBatchSize,
validationCropSize = validationCropSize,
validationResizeSize = validationResizeSize,
warmupCosineLRCycles = warmupCosineLRCycles,
warmupCosineLRWarmupEpochs = warmupCosineLRWarmupEpochs,
weightDecay = weightDecay,
weightedLoss = weightedLoss,
)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy