com.pulumi.gcp.vertex.kotlin.outputs.AiDeploymentResourcePoolDedicatedResources.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.vertex.kotlin.outputs
import kotlin.Int
import kotlin.Suppress
import kotlin.collections.List
/**
*
* @property autoscalingMetricSpecs A list of the metric specifications that overrides a resource utilization metric.
* Structure is documented below.
* @property machineSpec The specification of a single machine used by the prediction
* Structure is documented below.
* @property maxReplicaCount The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
* @property minReplicaCount The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
*/
public data class AiDeploymentResourcePoolDedicatedResources(
public val autoscalingMetricSpecs: List? = null,
public val machineSpec: AiDeploymentResourcePoolDedicatedResourcesMachineSpec,
public val maxReplicaCount: Int? = null,
public val minReplicaCount: Int,
) {
public companion object {
public fun toKotlin(javaType: com.pulumi.gcp.vertex.outputs.AiDeploymentResourcePoolDedicatedResources): AiDeploymentResourcePoolDedicatedResources = AiDeploymentResourcePoolDedicatedResources(
autoscalingMetricSpecs = javaType.autoscalingMetricSpecs().map({ args0 ->
args0.let({ args0 ->
com.pulumi.gcp.vertex.kotlin.outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.Companion.toKotlin(args0)
})
}),
machineSpec = javaType.machineSpec().let({ args0 ->
com.pulumi.gcp.vertex.kotlin.outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec.Companion.toKotlin(args0)
}),
maxReplicaCount = javaType.maxReplicaCount().map({ args0 -> args0 }).orElse(null),
minReplicaCount = javaType.minReplicaCount(),
)
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy