com.pulumi.gcp.vertex.kotlin.AiDeploymentResourcePoolArgs.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.vertex.kotlin
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs.builder
import com.pulumi.gcp.vertex.kotlin.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
import com.pulumi.gcp.vertex.kotlin.inputs.AiDeploymentResourcePoolDedicatedResourcesArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.jvm.JvmName
/**
* 'DeploymentResourcePool can be shared by multiple deployed models,
* whose underlying specification consists of dedicated resources.'
* To get more information about DeploymentResourcePool, see:
* * [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
* ## Example Usage
* ### Vertex Ai Deployment Resource Pool
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const deploymentResourcePool = new gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool", {
* region: "us-central1",
* name: "example-deployment-resource-pool",
* dedicatedResources: {
* machineSpec: {
* machineType: "n1-standard-4",
* acceleratorType: "NVIDIA_TESLA_K80",
* acceleratorCount: 1,
* },
* minReplicaCount: 1,
* maxReplicaCount: 2,
* autoscalingMetricSpecs: [{
* metricName: "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* target: 60,
* }],
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
* region="us-central1",
* name="example-deployment-resource-pool",
* dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
* machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
* machine_type="n1-standard-4",
* accelerator_type="NVIDIA_TESLA_K80",
* accelerator_count=1,
* ),
* min_replica_count=1,
* max_replica_count=2,
* autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
* metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* target=60,
* )],
* ))
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var deploymentResourcePool = new Gcp.Vertex.AiDeploymentResourcePool("deployment_resource_pool", new()
* {
* Region = "us-central1",
* Name = "example-deployment-resource-pool",
* DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
* {
* MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
* {
* MachineType = "n1-standard-4",
* AcceleratorType = "NVIDIA_TESLA_K80",
* AcceleratorCount = 1,
* },
* MinReplicaCount = 1,
* MaxReplicaCount = 2,
* AutoscalingMetricSpecs = new[]
* {
* new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
* {
* MetricName = "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* Target = 60,
* },
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/vertex"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* _, err := vertex.NewAiDeploymentResourcePool(ctx, "deployment_resource_pool", &vertex.AiDeploymentResourcePoolArgs{
* Region: pulumi.String("us-central1"),
* Name: pulumi.String("example-deployment-resource-pool"),
* DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
* MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
* MachineType: pulumi.String("n1-standard-4"),
* AcceleratorType: pulumi.String("NVIDIA_TESLA_K80"),
* AcceleratorCount: pulumi.Int(1),
* },
* MinReplicaCount: pulumi.Int(1),
* MaxReplicaCount: pulumi.Int(2),
* AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
* &vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
* MetricName: pulumi.String("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"),
* Target: pulumi.Int(60),
* },
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.vertex.AiDeploymentResourcePool;
* import com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs;
* import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs;
* import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var deploymentResourcePool = new AiDeploymentResourcePool("deploymentResourcePool", AiDeploymentResourcePoolArgs.builder()
* .region("us-central1")
* .name("example-deployment-resource-pool")
* .dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
* .machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
* .machineType("n1-standard-4")
* .acceleratorType("NVIDIA_TESLA_K80")
* .acceleratorCount(1)
* .build())
* .minReplicaCount(1)
* .maxReplicaCount(2)
* .autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
* .metricName("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle")
* .target(60)
* .build())
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* deploymentResourcePool:
* type: gcp:vertex:AiDeploymentResourcePool
* name: deployment_resource_pool
* properties:
* region: us-central1
* name: example-deployment-resource-pool
* dedicatedResources:
* machineSpec:
* machineType: n1-standard-4
* acceleratorType: NVIDIA_TESLA_K80
* acceleratorCount: 1
* minReplicaCount: 1
* maxReplicaCount: 2
* autoscalingMetricSpecs:
* - metricName: aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
* target: 60
* ```
*
* ## Import
* DeploymentResourcePool can be imported using any of these accepted formats:
* * `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
* * `{{project}}/{{region}}/{{name}}`
* * `{{region}}/{{name}}`
* * `{{name}}`
* When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
* ```
* @property dedicatedResources The underlying dedicated resources that the deployment resource pool uses.
* Structure is documented below.
* @property name The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
* - - -
* @property project The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
* @property region The region of deployment resource pool. eg us-central1
*/
public data class AiDeploymentResourcePoolArgs(
public val dedicatedResources: Output? = null,
public val name: Output? = null,
public val project: Output? = null,
public val region: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs =
com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs.builder()
.dedicatedResources(
dedicatedResources?.applyValue({ args0 ->
args0.let({ args0 ->
args0.toJava()
})
}),
)
.name(name?.applyValue({ args0 -> args0 }))
.project(project?.applyValue({ args0 -> args0 }))
.region(region?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [AiDeploymentResourcePoolArgs].
*/
@PulumiTagMarker
public class AiDeploymentResourcePoolArgsBuilder internal constructor() {
private var dedicatedResources: Output? = null
private var name: Output? = null
private var project: Output? = null
private var region: Output? = null
/**
* @param value The underlying dedicated resources that the deployment resource pool uses.
* Structure is documented below.
*/
@JvmName("wdiyddqaidjatxnd")
public suspend fun dedicatedResources(`value`: Output) {
this.dedicatedResources = value
}
/**
* @param value The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
* - - -
*/
@JvmName("lakybgsbfxpjapdb")
public suspend fun name(`value`: Output) {
this.name = value
}
/**
* @param value The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
@JvmName("oevxkotfkjjouyku")
public suspend fun project(`value`: Output) {
this.project = value
}
/**
* @param value The region of deployment resource pool. eg us-central1
*/
@JvmName("hxidtyfortchhjrt")
public suspend fun region(`value`: Output) {
this.region = value
}
/**
* @param value The underlying dedicated resources that the deployment resource pool uses.
* Structure is documented below.
*/
@JvmName("lpeusmvbxiiyxlps")
public suspend fun dedicatedResources(`value`: AiDeploymentResourcePoolDedicatedResourcesArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.dedicatedResources = mapped
}
/**
* @param argument The underlying dedicated resources that the deployment resource pool uses.
* Structure is documented below.
*/
@JvmName("kmbowlecnbubwabr")
public suspend fun dedicatedResources(argument: suspend AiDeploymentResourcePoolDedicatedResourcesArgsBuilder.() -> Unit) {
val toBeMapped = AiDeploymentResourcePoolDedicatedResourcesArgsBuilder().applySuspend {
argument()
}.build()
val mapped = of(toBeMapped)
this.dedicatedResources = mapped
}
/**
* @param value The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
* - - -
*/
@JvmName("nhyxhllkulfdexkl")
public suspend fun name(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.name = mapped
}
/**
* @param value The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
@JvmName("jshonhjkpasvxykj")
public suspend fun project(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.project = mapped
}
/**
* @param value The region of deployment resource pool. eg us-central1
*/
@JvmName("btsjvyxnosgnjtem")
public suspend fun region(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.region = mapped
}
internal fun build(): AiDeploymentResourcePoolArgs = AiDeploymentResourcePoolArgs(
dedicatedResources = dedicatedResources,
name = name,
project = project,
region = region,
)
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy