com.pulumi.gcp.vertex.kotlin.AiDeploymentResourcePool.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.vertex.kotlin
import com.pulumi.core.Output
import com.pulumi.gcp.vertex.kotlin.outputs.AiDeploymentResourcePoolDedicatedResources
import com.pulumi.gcp.vertex.kotlin.outputs.AiDeploymentResourcePoolDedicatedResources.Companion.toKotlin
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
/**
* Builder for [AiDeploymentResourcePool].
*/
@PulumiTagMarker
public class AiDeploymentResourcePoolResourceBuilder internal constructor() {
public var name: String? = null
public var args: AiDeploymentResourcePoolArgs = AiDeploymentResourcePoolArgs()
public var opts: CustomResourceOptions = CustomResourceOptions()
/**
* @param name The _unique_ name of the resulting resource.
*/
public fun name(`value`: String) {
this.name = value
}
/**
* @param block The arguments to use to populate this resource's properties.
*/
public suspend fun args(block: suspend AiDeploymentResourcePoolArgsBuilder.() -> Unit) {
val builder = AiDeploymentResourcePoolArgsBuilder()
block(builder)
this.args = builder.build()
}
/**
* @param block A bag of options that control this resource's behavior.
*/
public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
}
internal fun build(): AiDeploymentResourcePool {
val builtJavaResource = com.pulumi.gcp.vertex.AiDeploymentResourcePool(
this.name,
this.args.toJava(),
this.opts.toJava(),
)
return AiDeploymentResourcePool(builtJavaResource)
}
}
/**
* 'DeploymentResourcePool can be shared by multiple deployed models,
* whose underlying specification consists of dedicated resources.'
* To get more information about DeploymentResourcePool, see:
* * [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
* ## Example Usage
* ### Vertex Ai Deployment Resource Pool
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const deploymentResourcePool = new gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool", {
* region: "us-central1",
* name: "example-deployment-resource-pool",
* dedicatedResources: {
* machineSpec: {
* machineType: "n1-standard-4",
* acceleratorType: "NVIDIA_TESLA_K80",
* acceleratorCount: 1,
* },
* minReplicaCount: 1,
* maxReplicaCount: 2,
* autoscalingMetricSpecs: [{
* metricName: "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* target: 60,
* }],
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
* region="us-central1",
* name="example-deployment-resource-pool",
* dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
* machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
* machine_type="n1-standard-4",
* accelerator_type="NVIDIA_TESLA_K80",
* accelerator_count=1,
* ),
* min_replica_count=1,
* max_replica_count=2,
* autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
* metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* target=60,
* )],
* ))
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var deploymentResourcePool = new Gcp.Vertex.AiDeploymentResourcePool("deployment_resource_pool", new()
* {
* Region = "us-central1",
* Name = "example-deployment-resource-pool",
* DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
* {
* MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
* {
* MachineType = "n1-standard-4",
* AcceleratorType = "NVIDIA_TESLA_K80",
* AcceleratorCount = 1,
* },
* MinReplicaCount = 1,
* MaxReplicaCount = 2,
* AutoscalingMetricSpecs = new[]
* {
* new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
* {
* MetricName = "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
* Target = 60,
* },
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/vertex"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* _, err := vertex.NewAiDeploymentResourcePool(ctx, "deployment_resource_pool", &vertex.AiDeploymentResourcePoolArgs{
* Region: pulumi.String("us-central1"),
* Name: pulumi.String("example-deployment-resource-pool"),
* DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
* MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
* MachineType: pulumi.String("n1-standard-4"),
* AcceleratorType: pulumi.String("NVIDIA_TESLA_K80"),
* AcceleratorCount: pulumi.Int(1),
* },
* MinReplicaCount: pulumi.Int(1),
* MaxReplicaCount: pulumi.Int(2),
* AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
* &vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
* MetricName: pulumi.String("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"),
* Target: pulumi.Int(60),
* },
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.vertex.AiDeploymentResourcePool;
* import com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs;
* import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs;
* import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var deploymentResourcePool = new AiDeploymentResourcePool("deploymentResourcePool", AiDeploymentResourcePoolArgs.builder()
* .region("us-central1")
* .name("example-deployment-resource-pool")
* .dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
* .machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
* .machineType("n1-standard-4")
* .acceleratorType("NVIDIA_TESLA_K80")
* .acceleratorCount(1)
* .build())
* .minReplicaCount(1)
* .maxReplicaCount(2)
* .autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
* .metricName("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle")
* .target(60)
* .build())
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* deploymentResourcePool:
* type: gcp:vertex:AiDeploymentResourcePool
* name: deployment_resource_pool
* properties:
* region: us-central1
* name: example-deployment-resource-pool
* dedicatedResources:
* machineSpec:
* machineType: n1-standard-4
* acceleratorType: NVIDIA_TESLA_K80
* acceleratorCount: 1
* minReplicaCount: 1
* maxReplicaCount: 2
* autoscalingMetricSpecs:
* - metricName: aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
* target: 60
* ```
*
* ## Import
* DeploymentResourcePool can be imported using any of these accepted formats:
* * `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
* * `{{project}}/{{region}}/{{name}}`
* * `{{region}}/{{name}}`
* * `{{name}}`
* When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
* ```
* ```sh
* $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
* ```
*/
public class AiDeploymentResourcePool internal constructor(
override val javaResource: com.pulumi.gcp.vertex.AiDeploymentResourcePool,
) : KotlinCustomResource(javaResource, AiDeploymentResourcePoolMapper) {
/**
* A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
*/
public val createTime: Output
get() = javaResource.createTime().applyValue({ args0 -> args0 })
/**
* The underlying dedicated resources that the deployment resource pool uses.
* Structure is documented below.
*/
public val dedicatedResources: Output?
get() = javaResource.dedicatedResources().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> toKotlin(args0) })
}).orElse(null)
})
/**
* The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
* - - -
*/
public val name: Output
get() = javaResource.name().applyValue({ args0 -> args0 })
/**
* The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
public val project: Output
get() = javaResource.project().applyValue({ args0 -> args0 })
/**
* The region of deployment resource pool. eg us-central1
*/
public val region: Output?
get() = javaResource.region().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })
}
public object AiDeploymentResourcePoolMapper : ResourceMapper {
override fun supportsMappingOfType(javaResource: Resource): Boolean =
com.pulumi.gcp.vertex.AiDeploymentResourcePool::class == javaResource::class
override fun map(javaResource: Resource): AiDeploymentResourcePool =
AiDeploymentResourcePool(javaResource as com.pulumi.gcp.vertex.AiDeploymentResourcePool)
}
/**
* @see [AiDeploymentResourcePool].
* @param name The _unique_ name of the resulting resource.
* @param block Builder for [AiDeploymentResourcePool].
*/
public suspend fun aiDeploymentResourcePool(
name: String,
block: suspend AiDeploymentResourcePoolResourceBuilder.() -> Unit,
): AiDeploymentResourcePool {
val builder = AiDeploymentResourcePoolResourceBuilder()
builder.name(name)
block(builder)
return builder.build()
}
/**
* @see [AiDeploymentResourcePool].
* @param name The _unique_ name of the resulting resource.
*/
public fun aiDeploymentResourcePool(name: String): AiDeploymentResourcePool {
val builder = AiDeploymentResourcePoolResourceBuilder()
builder.name(name)
return builder.build()
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy