All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.vertex.kotlin.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.12.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.vertex.kotlin.inputs

import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs.builder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiNullFieldException
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Int
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.jvm.JvmName

/**
 *
 * @property autoscalingMetricSpecs A list of the metric specifications that overrides a resource utilization metric.
 * Structure is documented below.
 * @property machineSpec The specification of a single machine used by the prediction
 * Structure is documented below.
 * @property maxReplicaCount The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
 * @property minReplicaCount The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
 */
public data class AiDeploymentResourcePoolDedicatedResourcesArgs(
    public val autoscalingMetricSpecs: Output>? = null,
    public val machineSpec: Output,
    public val maxReplicaCount: Output? = null,
    public val minReplicaCount: Output,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs = com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
        .autoscalingMetricSpecs(
            autoscalingMetricSpecs?.applyValue({ args0 ->
                args0.map({ args0 ->
                    args0.let({ args0 -> args0.toJava() })
                })
            }),
        )
        .machineSpec(machineSpec.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
        .maxReplicaCount(maxReplicaCount?.applyValue({ args0 -> args0 }))
        .minReplicaCount(minReplicaCount.applyValue({ args0 -> args0 })).build()
}

/**
 * Builder for [AiDeploymentResourcePoolDedicatedResourcesArgs].
 */
@PulumiTagMarker
public class AiDeploymentResourcePoolDedicatedResourcesArgsBuilder internal constructor() {
    private var autoscalingMetricSpecs:
        Output>? = null

    private var machineSpec: Output? = null

    private var maxReplicaCount: Output? = null

    private var minReplicaCount: Output? = null

    /**
     * @param value A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("amceimlulqsuvgeh")
    public suspend fun autoscalingMetricSpecs(`value`: Output>) {
        this.autoscalingMetricSpecs = value
    }

    @JvmName("rsnaaplfcjyidmit")
    public suspend fun autoscalingMetricSpecs(vararg values: Output) {
        this.autoscalingMetricSpecs = Output.all(values.asList())
    }

    /**
     * @param values A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("bxmfccnjjojfpxav")
    public suspend fun autoscalingMetricSpecs(values: List>) {
        this.autoscalingMetricSpecs = Output.all(values)
    }

    /**
     * @param value The specification of a single machine used by the prediction
     * Structure is documented below.
     */
    @JvmName("kquqrflswhkaryes")
    public suspend fun machineSpec(`value`: Output) {
        this.machineSpec = value
    }

    /**
     * @param value The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
     */
    @JvmName("wcvqgadpccronwix")
    public suspend fun maxReplicaCount(`value`: Output) {
        this.maxReplicaCount = value
    }

    /**
     * @param value The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
     */
    @JvmName("cbvryhbkbllixtnd")
    public suspend fun minReplicaCount(`value`: Output) {
        this.minReplicaCount = value
    }

    /**
     * @param value A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("ywsjevmrkhgedoxw")
    public suspend fun autoscalingMetricSpecs(`value`: List?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.autoscalingMetricSpecs = mapped
    }

    /**
     * @param argument A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("uhtsjaesnpqthsjp")
    public suspend fun autoscalingMetricSpecs(argument: List Unit>) {
        val toBeMapped = argument.toList().map {
            AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.autoscalingMetricSpecs = mapped
    }

    /**
     * @param argument A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("buhdhrpnyimcylav")
    public suspend fun autoscalingMetricSpecs(vararg argument: suspend AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgsBuilder.() -> Unit) {
        val toBeMapped = argument.toList().map {
            AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.autoscalingMetricSpecs = mapped
    }

    /**
     * @param argument A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("friidnyjsvxalbgx")
    public suspend fun autoscalingMetricSpecs(argument: suspend AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgsBuilder.() -> Unit) {
        val toBeMapped =
            listOf(
                AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgsBuilder().applySuspend
                    { argument() }.build(),
            )
        val mapped = of(toBeMapped)
        this.autoscalingMetricSpecs = mapped
    }

    /**
     * @param values A list of the metric specifications that overrides a resource utilization metric.
     * Structure is documented below.
     */
    @JvmName("eofkoqmndauxarxq")
    public suspend fun autoscalingMetricSpecs(vararg values: AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs) {
        val toBeMapped = values.toList()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.autoscalingMetricSpecs = mapped
    }

    /**
     * @param value The specification of a single machine used by the prediction
     * Structure is documented below.
     */
    @JvmName("dxajcdmxkwslnchg")
    public suspend fun machineSpec(`value`: AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs) {
        val toBeMapped = value
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.machineSpec = mapped
    }

    /**
     * @param argument The specification of a single machine used by the prediction
     * Structure is documented below.
     */
    @JvmName("lwffwiodrebhfrux")
    public suspend fun machineSpec(argument: suspend AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgsBuilder.() -> Unit) {
        val toBeMapped =
            AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.machineSpec = mapped
    }

    /**
     * @param value The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
     */
    @JvmName("vefiaxatjremxdrw")
    public suspend fun maxReplicaCount(`value`: Int?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.maxReplicaCount = mapped
    }

    /**
     * @param value The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
     */
    @JvmName("uidabensjksqjioa")
    public suspend fun minReplicaCount(`value`: Int) {
        val toBeMapped = value
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.minReplicaCount = mapped
    }

    internal fun build(): AiDeploymentResourcePoolDedicatedResourcesArgs =
        AiDeploymentResourcePoolDedicatedResourcesArgs(
            autoscalingMetricSpecs = autoscalingMetricSpecs,
            machineSpec = machineSpec ?: throw PulumiNullFieldException("machineSpec"),
            maxReplicaCount = maxReplicaCount,
            minReplicaCount = minReplicaCount ?: throw PulumiNullFieldException("minReplicaCount"),
        )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy