commonMain.aws.sdk.kotlin.services.cleanroomsml.model.StartTrainedModelInferenceJobRequest.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of cleanroomsml-jvm Show documentation
Show all versions of cleanroomsml-jvm Show documentation
The AWS SDK for Kotlin client for CleanRoomsML
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.cleanroomsml.model
import aws.smithy.kotlin.runtime.SdkDsl
public class StartTrainedModelInferenceJobRequest private constructor(builder: Builder) {
/**
* The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.
*/
public val configuredModelAlgorithmAssociationArn: kotlin.String? = builder.configuredModelAlgorithmAssociationArn
/**
* The execution parameters for the container.
*/
public val containerExecutionParameters: aws.sdk.kotlin.services.cleanroomsml.model.InferenceContainerExecutionParameters? = builder.containerExecutionParameters
/**
* Defines he data source that is used for the trained model inference job.
*/
public val dataSource: aws.sdk.kotlin.services.cleanroomsml.model.ModelInferenceDataSource? = builder.dataSource
/**
* The description of the trained model inference job.
*/
public val description: kotlin.String? = builder.description
/**
* The environment variables to set in the Docker container.
*/
public val environment: Map? = builder.environment
/**
* The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.
*/
public val kmsKeyArn: kotlin.String? = builder.kmsKeyArn
/**
* The membership ID of the membership that contains the trained model inference job.
*/
public val membershipIdentifier: kotlin.String? = builder.membershipIdentifier
/**
* The name of the trained model inference job.
*/
public val name: kotlin.String? = builder.name
/**
* Defines the output configuration information for the trained model inference job.
*/
public val outputConfiguration: aws.sdk.kotlin.services.cleanroomsml.model.InferenceOutputConfiguration? = builder.outputConfiguration
/**
* Defines the resource configuration for the trained model inference job.
*/
public val resourceConfig: aws.sdk.kotlin.services.cleanroomsml.model.InferenceResourceConfig? = builder.resourceConfig
/**
* The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
* + Maximum number of tags per resource - 50.
* + For each resource, each tag key must be unique, and each tag key can have only one value.
* + Maximum key length - 128 Unicode characters in UTF-8.
* + Maximum value length - 256 Unicode characters in UTF-8.
* + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
* + Tag keys and values are case sensitive.
* + Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
*/
public val tags: Map? = builder.tags
/**
* The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.
*/
public val trainedModelArn: kotlin.String? = builder.trainedModelArn
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.cleanroomsml.model.StartTrainedModelInferenceJobRequest = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("StartTrainedModelInferenceJobRequest(")
append("configuredModelAlgorithmAssociationArn=$configuredModelAlgorithmAssociationArn,")
append("containerExecutionParameters=$containerExecutionParameters,")
append("dataSource=$dataSource,")
append("description=$description,")
append("environment=$environment,")
append("kmsKeyArn=$kmsKeyArn,")
append("membershipIdentifier=$membershipIdentifier,")
append("name=$name,")
append("outputConfiguration=$outputConfiguration,")
append("resourceConfig=$resourceConfig,")
append("tags=$tags,")
append("trainedModelArn=$trainedModelArn")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = configuredModelAlgorithmAssociationArn?.hashCode() ?: 0
result = 31 * result + (containerExecutionParameters?.hashCode() ?: 0)
result = 31 * result + (dataSource?.hashCode() ?: 0)
result = 31 * result + (description?.hashCode() ?: 0)
result = 31 * result + (environment?.hashCode() ?: 0)
result = 31 * result + (kmsKeyArn?.hashCode() ?: 0)
result = 31 * result + (membershipIdentifier?.hashCode() ?: 0)
result = 31 * result + (name?.hashCode() ?: 0)
result = 31 * result + (outputConfiguration?.hashCode() ?: 0)
result = 31 * result + (resourceConfig?.hashCode() ?: 0)
result = 31 * result + (tags?.hashCode() ?: 0)
result = 31 * result + (trainedModelArn?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as StartTrainedModelInferenceJobRequest
if (configuredModelAlgorithmAssociationArn != other.configuredModelAlgorithmAssociationArn) return false
if (containerExecutionParameters != other.containerExecutionParameters) return false
if (dataSource != other.dataSource) return false
if (description != other.description) return false
if (environment != other.environment) return false
if (kmsKeyArn != other.kmsKeyArn) return false
if (membershipIdentifier != other.membershipIdentifier) return false
if (name != other.name) return false
if (outputConfiguration != other.outputConfiguration) return false
if (resourceConfig != other.resourceConfig) return false
if (tags != other.tags) return false
if (trainedModelArn != other.trainedModelArn) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.cleanroomsml.model.StartTrainedModelInferenceJobRequest = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.
*/
public var configuredModelAlgorithmAssociationArn: kotlin.String? = null
/**
* The execution parameters for the container.
*/
public var containerExecutionParameters: aws.sdk.kotlin.services.cleanroomsml.model.InferenceContainerExecutionParameters? = null
/**
* Defines he data source that is used for the trained model inference job.
*/
public var dataSource: aws.sdk.kotlin.services.cleanroomsml.model.ModelInferenceDataSource? = null
/**
* The description of the trained model inference job.
*/
public var description: kotlin.String? = null
/**
* The environment variables to set in the Docker container.
*/
public var environment: Map? = null
/**
* The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.
*/
public var kmsKeyArn: kotlin.String? = null
/**
* The membership ID of the membership that contains the trained model inference job.
*/
public var membershipIdentifier: kotlin.String? = null
/**
* The name of the trained model inference job.
*/
public var name: kotlin.String? = null
/**
* Defines the output configuration information for the trained model inference job.
*/
public var outputConfiguration: aws.sdk.kotlin.services.cleanroomsml.model.InferenceOutputConfiguration? = null
/**
* Defines the resource configuration for the trained model inference job.
*/
public var resourceConfig: aws.sdk.kotlin.services.cleanroomsml.model.InferenceResourceConfig? = null
/**
* The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
* + Maximum number of tags per resource - 50.
* + For each resource, each tag key must be unique, and each tag key can have only one value.
* + Maximum key length - 128 Unicode characters in UTF-8.
* + Maximum value length - 256 Unicode characters in UTF-8.
* + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
* + Tag keys and values are case sensitive.
* + Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
*/
public var tags: Map? = null
/**
* The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.
*/
public var trainedModelArn: kotlin.String? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.cleanroomsml.model.StartTrainedModelInferenceJobRequest) : this() {
this.configuredModelAlgorithmAssociationArn = x.configuredModelAlgorithmAssociationArn
this.containerExecutionParameters = x.containerExecutionParameters
this.dataSource = x.dataSource
this.description = x.description
this.environment = x.environment
this.kmsKeyArn = x.kmsKeyArn
this.membershipIdentifier = x.membershipIdentifier
this.name = x.name
this.outputConfiguration = x.outputConfiguration
this.resourceConfig = x.resourceConfig
this.tags = x.tags
this.trainedModelArn = x.trainedModelArn
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.cleanroomsml.model.StartTrainedModelInferenceJobRequest = StartTrainedModelInferenceJobRequest(this)
/**
* construct an [aws.sdk.kotlin.services.cleanroomsml.model.InferenceContainerExecutionParameters] inside the given [block]
*/
public fun containerExecutionParameters(block: aws.sdk.kotlin.services.cleanroomsml.model.InferenceContainerExecutionParameters.Builder.() -> kotlin.Unit) {
this.containerExecutionParameters = aws.sdk.kotlin.services.cleanroomsml.model.InferenceContainerExecutionParameters.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.cleanroomsml.model.ModelInferenceDataSource] inside the given [block]
*/
public fun dataSource(block: aws.sdk.kotlin.services.cleanroomsml.model.ModelInferenceDataSource.Builder.() -> kotlin.Unit) {
this.dataSource = aws.sdk.kotlin.services.cleanroomsml.model.ModelInferenceDataSource.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.cleanroomsml.model.InferenceOutputConfiguration] inside the given [block]
*/
public fun outputConfiguration(block: aws.sdk.kotlin.services.cleanroomsml.model.InferenceOutputConfiguration.Builder.() -> kotlin.Unit) {
this.outputConfiguration = aws.sdk.kotlin.services.cleanroomsml.model.InferenceOutputConfiguration.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.cleanroomsml.model.InferenceResourceConfig] inside the given [block]
*/
public fun resourceConfig(block: aws.sdk.kotlin.services.cleanroomsml.model.InferenceResourceConfig.Builder.() -> kotlin.Unit) {
this.resourceConfig = aws.sdk.kotlin.services.cleanroomsml.model.InferenceResourceConfig.invoke(block)
}
internal fun correctErrors(): Builder {
return this
}
}
}