
commonMain.aws.sdk.kotlin.services.neptunedata.model.StartMlModelTrainingJobRequest.kt Maven / Gradle / Ivy
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.neptunedata.model
import aws.smithy.kotlin.runtime.SdkDsl
public class StartMlModelTrainingJobRequest private constructor(builder: Builder) {
/**
* The type of ML instance used in preparing and managing training of ML models. This is a CPU instance chosen based on memory requirements for processing the training data and model.
*/
public val baseProcessingInstanceType: kotlin.String? = builder.baseProcessingInstanceType
/**
* The configuration for custom model training. This is a JSON object.
*/
public val customModelTrainingParameters: aws.sdk.kotlin.services.neptunedata.model.CustomModelTrainingParameters? = builder.customModelTrainingParameters
/**
* The job ID of the completed data-processing job that has created the data that the training will work with.
*/
public val dataProcessingJobId: kotlin.String? = builder.dataProcessingJobId
/**
* Optimizes the cost of training machine-learning models by using Amazon Elastic Compute Cloud spot instances. The default is `False`.
*/
public val enableManagedSpotTraining: kotlin.Boolean? = builder.enableManagedSpotTraining
/**
* A unique identifier for the new job. The default is An autogenerated UUID.
*/
public val id: kotlin.String? = builder.id
/**
* Maximum total number of training jobs to start for the hyperparameter tuning job. The default is 2. Neptune ML automatically tunes the hyperparameters of the machine learning model. To obtain a model that performs well, use at least 10 jobs (in other words, set `maxHPONumberOfTrainingJobs` to 10). In general, the more tuning runs, the better the results.
*/
public val maxHpoNumberOfTrainingJobs: kotlin.Int? = builder.maxHpoNumberOfTrainingJobs
/**
* Maximum number of parallel training jobs to start for the hyperparameter tuning job. The default is 2. The number of parallel jobs you can run is limited by the available resources on your training instance.
*/
public val maxHpoParallelTrainingJobs: kotlin.Int? = builder.maxHpoParallelTrainingJobs
/**
* The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
*/
public val neptuneIamRoleArn: kotlin.String? = builder.neptuneIamRoleArn
/**
* The job ID of a completed model-training job that you want to update incrementally based on updated data.
*/
public val previousModelTrainingJobId: kotlin.String? = builder.previousModelTrainingJobId
/**
* The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.
*/
public val s3OutputEncryptionKmsKey: kotlin.String? = builder.s3OutputEncryptionKmsKey
/**
* The ARN of an IAM role for SageMaker execution.This must be listed in your DB cluster parameter group or an error will occur.
*/
public val sagemakerIamRoleArn: kotlin.String? = builder.sagemakerIamRoleArn
/**
* The VPC security group IDs. The default is None.
*/
public val securityGroupIds: List? = builder.securityGroupIds
/**
* The IDs of the subnets in the Neptune VPC. The default is None.
*/
public val subnets: List? = builder.subnets
/**
* The location in Amazon S3 where the model artifacts are to be stored.
*/
public val trainModelS3Location: kotlin.String? = builder.trainModelS3Location
/**
* The type of ML instance used for model training. All Neptune ML models support CPU, GPU, and multiGPU training. The default is `ml.p3.2xlarge`. Choosing the right instance type for training depends on the task type, graph size, and your budget.
*/
public val trainingInstanceType: kotlin.String? = builder.trainingInstanceType
/**
* The disk volume size of the training instance. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.
*/
public val trainingInstanceVolumeSizeInGb: kotlin.Int? = builder.trainingInstanceVolumeSizeInGb
/**
* Timeout in seconds for the training job. The default is 86,400 (1 day).
*/
public val trainingTimeOutInSeconds: kotlin.Int? = builder.trainingTimeOutInSeconds
/**
* The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
*/
public val volumeEncryptionKmsKey: kotlin.String? = builder.volumeEncryptionKmsKey
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.neptunedata.model.StartMlModelTrainingJobRequest = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("StartMlModelTrainingJobRequest(")
append("baseProcessingInstanceType=$baseProcessingInstanceType,")
append("customModelTrainingParameters=$customModelTrainingParameters,")
append("dataProcessingJobId=$dataProcessingJobId,")
append("enableManagedSpotTraining=$enableManagedSpotTraining,")
append("id=$id,")
append("maxHpoNumberOfTrainingJobs=$maxHpoNumberOfTrainingJobs,")
append("maxHpoParallelTrainingJobs=$maxHpoParallelTrainingJobs,")
append("neptuneIamRoleArn=$neptuneIamRoleArn,")
append("previousModelTrainingJobId=$previousModelTrainingJobId,")
append("s3OutputEncryptionKmsKey=$s3OutputEncryptionKmsKey,")
append("sagemakerIamRoleArn=$sagemakerIamRoleArn,")
append("securityGroupIds=$securityGroupIds,")
append("subnets=$subnets,")
append("trainModelS3Location=$trainModelS3Location,")
append("trainingInstanceType=$trainingInstanceType,")
append("trainingInstanceVolumeSizeInGb=$trainingInstanceVolumeSizeInGb,")
append("trainingTimeOutInSeconds=$trainingTimeOutInSeconds,")
append("volumeEncryptionKmsKey=$volumeEncryptionKmsKey")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = baseProcessingInstanceType?.hashCode() ?: 0
result = 31 * result + (customModelTrainingParameters?.hashCode() ?: 0)
result = 31 * result + (dataProcessingJobId?.hashCode() ?: 0)
result = 31 * result + (enableManagedSpotTraining?.hashCode() ?: 0)
result = 31 * result + (id?.hashCode() ?: 0)
result = 31 * result + (maxHpoNumberOfTrainingJobs ?: 0)
result = 31 * result + (maxHpoParallelTrainingJobs ?: 0)
result = 31 * result + (neptuneIamRoleArn?.hashCode() ?: 0)
result = 31 * result + (previousModelTrainingJobId?.hashCode() ?: 0)
result = 31 * result + (s3OutputEncryptionKmsKey?.hashCode() ?: 0)
result = 31 * result + (sagemakerIamRoleArn?.hashCode() ?: 0)
result = 31 * result + (securityGroupIds?.hashCode() ?: 0)
result = 31 * result + (subnets?.hashCode() ?: 0)
result = 31 * result + (trainModelS3Location?.hashCode() ?: 0)
result = 31 * result + (trainingInstanceType?.hashCode() ?: 0)
result = 31 * result + (trainingInstanceVolumeSizeInGb ?: 0)
result = 31 * result + (trainingTimeOutInSeconds ?: 0)
result = 31 * result + (volumeEncryptionKmsKey?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as StartMlModelTrainingJobRequest
if (baseProcessingInstanceType != other.baseProcessingInstanceType) return false
if (customModelTrainingParameters != other.customModelTrainingParameters) return false
if (dataProcessingJobId != other.dataProcessingJobId) return false
if (enableManagedSpotTraining != other.enableManagedSpotTraining) return false
if (id != other.id) return false
if (maxHpoNumberOfTrainingJobs != other.maxHpoNumberOfTrainingJobs) return false
if (maxHpoParallelTrainingJobs != other.maxHpoParallelTrainingJobs) return false
if (neptuneIamRoleArn != other.neptuneIamRoleArn) return false
if (previousModelTrainingJobId != other.previousModelTrainingJobId) return false
if (s3OutputEncryptionKmsKey != other.s3OutputEncryptionKmsKey) return false
if (sagemakerIamRoleArn != other.sagemakerIamRoleArn) return false
if (securityGroupIds != other.securityGroupIds) return false
if (subnets != other.subnets) return false
if (trainModelS3Location != other.trainModelS3Location) return false
if (trainingInstanceType != other.trainingInstanceType) return false
if (trainingInstanceVolumeSizeInGb != other.trainingInstanceVolumeSizeInGb) return false
if (trainingTimeOutInSeconds != other.trainingTimeOutInSeconds) return false
if (volumeEncryptionKmsKey != other.volumeEncryptionKmsKey) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.neptunedata.model.StartMlModelTrainingJobRequest = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* The type of ML instance used in preparing and managing training of ML models. This is a CPU instance chosen based on memory requirements for processing the training data and model.
*/
public var baseProcessingInstanceType: kotlin.String? = null
/**
* The configuration for custom model training. This is a JSON object.
*/
public var customModelTrainingParameters: aws.sdk.kotlin.services.neptunedata.model.CustomModelTrainingParameters? = null
/**
* The job ID of the completed data-processing job that has created the data that the training will work with.
*/
public var dataProcessingJobId: kotlin.String? = null
/**
* Optimizes the cost of training machine-learning models by using Amazon Elastic Compute Cloud spot instances. The default is `False`.
*/
public var enableManagedSpotTraining: kotlin.Boolean? = null
/**
* A unique identifier for the new job. The default is An autogenerated UUID.
*/
public var id: kotlin.String? = null
/**
* Maximum total number of training jobs to start for the hyperparameter tuning job. The default is 2. Neptune ML automatically tunes the hyperparameters of the machine learning model. To obtain a model that performs well, use at least 10 jobs (in other words, set `maxHPONumberOfTrainingJobs` to 10). In general, the more tuning runs, the better the results.
*/
public var maxHpoNumberOfTrainingJobs: kotlin.Int? = null
/**
* Maximum number of parallel training jobs to start for the hyperparameter tuning job. The default is 2. The number of parallel jobs you can run is limited by the available resources on your training instance.
*/
public var maxHpoParallelTrainingJobs: kotlin.Int? = null
/**
* The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
*/
public var neptuneIamRoleArn: kotlin.String? = null
/**
* The job ID of a completed model-training job that you want to update incrementally based on updated data.
*/
public var previousModelTrainingJobId: kotlin.String? = null
/**
* The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.
*/
public var s3OutputEncryptionKmsKey: kotlin.String? = null
/**
* The ARN of an IAM role for SageMaker execution.This must be listed in your DB cluster parameter group or an error will occur.
*/
public var sagemakerIamRoleArn: kotlin.String? = null
/**
* The VPC security group IDs. The default is None.
*/
public var securityGroupIds: List? = null
/**
* The IDs of the subnets in the Neptune VPC. The default is None.
*/
public var subnets: List? = null
/**
* The location in Amazon S3 where the model artifacts are to be stored.
*/
public var trainModelS3Location: kotlin.String? = null
/**
* The type of ML instance used for model training. All Neptune ML models support CPU, GPU, and multiGPU training. The default is `ml.p3.2xlarge`. Choosing the right instance type for training depends on the task type, graph size, and your budget.
*/
public var trainingInstanceType: kotlin.String? = null
/**
* The disk volume size of the training instance. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.
*/
public var trainingInstanceVolumeSizeInGb: kotlin.Int? = null
/**
* Timeout in seconds for the training job. The default is 86,400 (1 day).
*/
public var trainingTimeOutInSeconds: kotlin.Int? = null
/**
* The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
*/
public var volumeEncryptionKmsKey: kotlin.String? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.neptunedata.model.StartMlModelTrainingJobRequest) : this() {
this.baseProcessingInstanceType = x.baseProcessingInstanceType
this.customModelTrainingParameters = x.customModelTrainingParameters
this.dataProcessingJobId = x.dataProcessingJobId
this.enableManagedSpotTraining = x.enableManagedSpotTraining
this.id = x.id
this.maxHpoNumberOfTrainingJobs = x.maxHpoNumberOfTrainingJobs
this.maxHpoParallelTrainingJobs = x.maxHpoParallelTrainingJobs
this.neptuneIamRoleArn = x.neptuneIamRoleArn
this.previousModelTrainingJobId = x.previousModelTrainingJobId
this.s3OutputEncryptionKmsKey = x.s3OutputEncryptionKmsKey
this.sagemakerIamRoleArn = x.sagemakerIamRoleArn
this.securityGroupIds = x.securityGroupIds
this.subnets = x.subnets
this.trainModelS3Location = x.trainModelS3Location
this.trainingInstanceType = x.trainingInstanceType
this.trainingInstanceVolumeSizeInGb = x.trainingInstanceVolumeSizeInGb
this.trainingTimeOutInSeconds = x.trainingTimeOutInSeconds
this.volumeEncryptionKmsKey = x.volumeEncryptionKmsKey
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.neptunedata.model.StartMlModelTrainingJobRequest = StartMlModelTrainingJobRequest(this)
/**
* construct an [aws.sdk.kotlin.services.neptunedata.model.CustomModelTrainingParameters] inside the given [block]
*/
public fun customModelTrainingParameters(block: aws.sdk.kotlin.services.neptunedata.model.CustomModelTrainingParameters.Builder.() -> kotlin.Unit) {
this.customModelTrainingParameters = aws.sdk.kotlin.services.neptunedata.model.CustomModelTrainingParameters.invoke(block)
}
internal fun correctErrors(): Builder {
return this
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy