All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.batch.BatchClient.kt Maven / Gradle / Ivy

// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.batch

import aws.sdk.kotlin.runtime.auth.credentials.DefaultChainCredentialsProvider
import aws.sdk.kotlin.runtime.auth.credentials.internal.manage
import aws.sdk.kotlin.runtime.client.AwsSdkClientConfig
import aws.sdk.kotlin.runtime.config.AbstractAwsSdkClientFactory
import aws.sdk.kotlin.runtime.config.endpoints.resolveEndpointUrl
import aws.sdk.kotlin.runtime.config.profile.AwsProfile
import aws.sdk.kotlin.runtime.config.profile.AwsSharedConfig
import aws.sdk.kotlin.runtime.http.retries.AwsRetryPolicy
import aws.sdk.kotlin.services.batch.auth.BatchAuthSchemeProvider
import aws.sdk.kotlin.services.batch.auth.DefaultBatchAuthSchemeProvider
import aws.sdk.kotlin.services.batch.endpoints.BatchEndpointParameters
import aws.sdk.kotlin.services.batch.endpoints.BatchEndpointProvider
import aws.sdk.kotlin.services.batch.endpoints.DefaultBatchEndpointProvider
import aws.sdk.kotlin.services.batch.model.CancelJobRequest
import aws.sdk.kotlin.services.batch.model.CancelJobResponse
import aws.sdk.kotlin.services.batch.model.CreateComputeEnvironmentRequest
import aws.sdk.kotlin.services.batch.model.CreateComputeEnvironmentResponse
import aws.sdk.kotlin.services.batch.model.CreateJobQueueRequest
import aws.sdk.kotlin.services.batch.model.CreateJobQueueResponse
import aws.sdk.kotlin.services.batch.model.CreateSchedulingPolicyRequest
import aws.sdk.kotlin.services.batch.model.CreateSchedulingPolicyResponse
import aws.sdk.kotlin.services.batch.model.DeleteComputeEnvironmentRequest
import aws.sdk.kotlin.services.batch.model.DeleteComputeEnvironmentResponse
import aws.sdk.kotlin.services.batch.model.DeleteJobQueueRequest
import aws.sdk.kotlin.services.batch.model.DeleteJobQueueResponse
import aws.sdk.kotlin.services.batch.model.DeleteSchedulingPolicyRequest
import aws.sdk.kotlin.services.batch.model.DeleteSchedulingPolicyResponse
import aws.sdk.kotlin.services.batch.model.DeregisterJobDefinitionRequest
import aws.sdk.kotlin.services.batch.model.DeregisterJobDefinitionResponse
import aws.sdk.kotlin.services.batch.model.DescribeComputeEnvironmentsRequest
import aws.sdk.kotlin.services.batch.model.DescribeComputeEnvironmentsResponse
import aws.sdk.kotlin.services.batch.model.DescribeJobDefinitionsRequest
import aws.sdk.kotlin.services.batch.model.DescribeJobDefinitionsResponse
import aws.sdk.kotlin.services.batch.model.DescribeJobQueuesRequest
import aws.sdk.kotlin.services.batch.model.DescribeJobQueuesResponse
import aws.sdk.kotlin.services.batch.model.DescribeJobsRequest
import aws.sdk.kotlin.services.batch.model.DescribeJobsResponse
import aws.sdk.kotlin.services.batch.model.DescribeSchedulingPoliciesRequest
import aws.sdk.kotlin.services.batch.model.DescribeSchedulingPoliciesResponse
import aws.sdk.kotlin.services.batch.model.ListJobsRequest
import aws.sdk.kotlin.services.batch.model.ListJobsResponse
import aws.sdk.kotlin.services.batch.model.ListSchedulingPoliciesRequest
import aws.sdk.kotlin.services.batch.model.ListSchedulingPoliciesResponse
import aws.sdk.kotlin.services.batch.model.ListTagsForResourceRequest
import aws.sdk.kotlin.services.batch.model.ListTagsForResourceResponse
import aws.sdk.kotlin.services.batch.model.RegisterJobDefinitionRequest
import aws.sdk.kotlin.services.batch.model.RegisterJobDefinitionResponse
import aws.sdk.kotlin.services.batch.model.SubmitJobRequest
import aws.sdk.kotlin.services.batch.model.SubmitJobResponse
import aws.sdk.kotlin.services.batch.model.TagResourceRequest
import aws.sdk.kotlin.services.batch.model.TagResourceResponse
import aws.sdk.kotlin.services.batch.model.TerminateJobRequest
import aws.sdk.kotlin.services.batch.model.TerminateJobResponse
import aws.sdk.kotlin.services.batch.model.UntagResourceRequest
import aws.sdk.kotlin.services.batch.model.UntagResourceResponse
import aws.sdk.kotlin.services.batch.model.UpdateComputeEnvironmentRequest
import aws.sdk.kotlin.services.batch.model.UpdateComputeEnvironmentResponse
import aws.sdk.kotlin.services.batch.model.UpdateJobQueueRequest
import aws.sdk.kotlin.services.batch.model.UpdateJobQueueResponse
import aws.sdk.kotlin.services.batch.model.UpdateSchedulingPolicyRequest
import aws.sdk.kotlin.services.batch.model.UpdateSchedulingPolicyResponse
import aws.smithy.kotlin.runtime.auth.awscredentials.CredentialsProvider
import aws.smithy.kotlin.runtime.auth.awscredentials.CredentialsProviderConfig
import aws.smithy.kotlin.runtime.awsprotocol.ClockSkewInterceptor
import aws.smithy.kotlin.runtime.client.AbstractSdkClientBuilder
import aws.smithy.kotlin.runtime.client.LogMode
import aws.smithy.kotlin.runtime.client.RetryClientConfig
import aws.smithy.kotlin.runtime.client.RetryStrategyClientConfig
import aws.smithy.kotlin.runtime.client.RetryStrategyClientConfigImpl
import aws.smithy.kotlin.runtime.client.SdkClient
import aws.smithy.kotlin.runtime.client.SdkClientConfig
import aws.smithy.kotlin.runtime.client.SdkClientFactory
import aws.smithy.kotlin.runtime.http.auth.AuthScheme
import aws.smithy.kotlin.runtime.http.auth.HttpAuthConfig
import aws.smithy.kotlin.runtime.http.config.HttpClientConfig
import aws.smithy.kotlin.runtime.http.config.HttpEngineConfig
import aws.smithy.kotlin.runtime.http.engine.HttpClientEngine
import aws.smithy.kotlin.runtime.http.engine.HttpEngineConfigImpl
import aws.smithy.kotlin.runtime.http.interceptors.HttpInterceptor
import aws.smithy.kotlin.runtime.net.url.Url
import aws.smithy.kotlin.runtime.retries.RetryStrategy
import aws.smithy.kotlin.runtime.retries.policy.RetryPolicy
import aws.smithy.kotlin.runtime.telemetry.Global
import aws.smithy.kotlin.runtime.telemetry.TelemetryConfig
import aws.smithy.kotlin.runtime.telemetry.TelemetryProvider
import aws.smithy.kotlin.runtime.util.LazyAsyncValue
import kotlin.collections.List
import kotlin.jvm.JvmStatic


public const val ServiceId: String = "Batch"
public const val SdkVersion: String = "1.0.14"

/**
 * # Batch
 * Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of the batch computing to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. You can use Batch to efficiently provision resources d, and work toward eliminating capacity constraints, reducing your overall compute costs, and delivering results more quickly.
 *
 * As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus on analyzing results and solving your specific problems instead.
 */
public interface BatchClient : SdkClient {
    /**
     * BatchClient's configuration
     */
    public override val config: Config

    public companion object : AbstractAwsSdkClientFactory() {
        @JvmStatic
        override fun builder(): Builder = Builder()

        override suspend fun finalizeConfig(builder: Builder, sharedConfig: LazyAsyncValue, activeProfile: LazyAsyncValue) {
            builder.config.endpointUrl = builder.config.endpointUrl ?: resolveEndpointUrl(
                sharedConfig,
                "Batch",
                "BATCH",
                "batch",
            )
            builder.config.interceptors.add(0, ClockSkewInterceptor())
        }
    }

    public class Builder internal constructor(): AbstractSdkClientBuilder() {
        override val config: Config.Builder = Config.Builder()
        override fun newClient(config: Config): BatchClient = DefaultBatchClient(config)
    }

    public class Config private constructor(builder: Builder) : AwsSdkClientConfig, CredentialsProviderConfig, HttpAuthConfig, HttpClientConfig, HttpEngineConfig by builder.buildHttpEngineConfig(), RetryClientConfig, RetryStrategyClientConfig by builder.buildRetryStrategyClientConfig(), SdkClientConfig, TelemetryConfig {
        override val clientName: String = builder.clientName
        override val region: String? = builder.region
        override val authSchemes: kotlin.collections.List = builder.authSchemes
        override val credentialsProvider: CredentialsProvider = builder.credentialsProvider ?: DefaultChainCredentialsProvider(httpClient = httpClient, region = region).manage()
        public val endpointProvider: BatchEndpointProvider = builder.endpointProvider ?: DefaultBatchEndpointProvider()
        public val endpointUrl: Url? = builder.endpointUrl
        override val interceptors: kotlin.collections.List = builder.interceptors
        override val logMode: LogMode = builder.logMode ?: LogMode.Default
        override val retryPolicy: RetryPolicy = builder.retryPolicy ?: AwsRetryPolicy.Default
        override val telemetryProvider: TelemetryProvider = builder.telemetryProvider ?: TelemetryProvider.Global
        override val useDualStack: Boolean = builder.useDualStack ?: false
        override val useFips: Boolean = builder.useFips ?: false
        override val applicationId: String? = builder.applicationId
        public val authSchemeProvider: BatchAuthSchemeProvider = builder.authSchemeProvider ?: DefaultBatchAuthSchemeProvider()
        public companion object {
            public inline operator fun invoke(block: Builder.() -> kotlin.Unit): Config = Builder().apply(block).build()
        }

        public fun toBuilder(): Builder = Builder().apply {
            clientName = [email protected]
            region = [email protected]
            authSchemes = [email protected]
            credentialsProvider = [email protected]
            endpointProvider = [email protected]
            endpointUrl = [email protected]
            httpClient = [email protected]
            interceptors = [email protected]()
            logMode = [email protected]
            retryPolicy = [email protected]
            retryStrategy = [email protected]
            telemetryProvider = [email protected]
            useDualStack = [email protected]
            useFips = [email protected]
            applicationId = [email protected]
            authSchemeProvider = [email protected]
        }

        public class Builder : AwsSdkClientConfig.Builder, CredentialsProviderConfig.Builder, HttpAuthConfig.Builder, HttpClientConfig.Builder, HttpEngineConfig.Builder by HttpEngineConfigImpl.BuilderImpl(), RetryClientConfig.Builder, RetryStrategyClientConfig.Builder by RetryStrategyClientConfigImpl.BuilderImpl(), SdkClientConfig.Builder, TelemetryConfig.Builder {
            /**
             * A reader-friendly name for the client.
             */
            override var clientName: String = "Batch"

            /**
             * The AWS region (e.g. `us-west-2`) to make requests to. See about AWS
             * [global infrastructure](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) for more
             * information
             */
            override var region: String? = null

            /**
             * Register new or override default [AuthScheme]s configured for this client. By default, the set
             * of auth schemes configured comes from the service model. An auth scheme configured explicitly takes
             * precedence over the defaults and can be used to customize identity resolution and signing for specific
             * authentication schemes.
             */
            override var authSchemes: kotlin.collections.List = emptyList()

            /**
             * The AWS credentials provider to use for authenticating requests. If not provided a
             * [aws.sdk.kotlin.runtime.auth.credentials.DefaultChainCredentialsProvider] instance will be used.
             * NOTE: The caller is responsible for managing the lifetime of the provider when set. The SDK
             * client will not close it when the client is closed.
             */
            override var credentialsProvider: CredentialsProvider? = null

            /**
             * The endpoint provider used to determine where to make service requests. **This is an advanced config
             * option.**
             *
             * Endpoint resolution occurs as part of the workflow for every request made via the service client.
             *
             * The inputs to endpoint resolution are defined on a per-service basis (see [EndpointParameters]).
             */
            public var endpointProvider: BatchEndpointProvider? = null

            /**
             * A custom endpoint to route requests to. The endpoint set here is passed to the configured
             * [endpointProvider], which may inspect and modify it as needed.
             *
             * Setting a custom endpointUrl should generally be preferred to overriding the [endpointProvider] and is
             * the recommended way to route requests to development or preview instances of a service.
             *
             * **This is an advanced config option.**
             */
            public var endpointUrl: Url? = null

            /**
             * Add an [aws.smithy.kotlin.runtime.client.Interceptor] that will have access to read and modify
             * the request and response objects as they are processed by the SDK.
             * Interceptors added using this method are executed in the order they are configured and are always
             * later than any added automatically by the SDK.
             */
            override var interceptors: kotlin.collections.MutableList = kotlin.collections.mutableListOf()

            /**
             * Configure events that will be logged. By default clients will not output
             * raw requests or responses. Use this setting to opt-in to additional debug logging.
             *
             * This can be used to configure logging of requests, responses, retries, etc of SDK clients.
             *
             * **NOTE**: Logging of raw requests or responses may leak sensitive information! It may also have
             * performance considerations when dumping the request/response body. This is primarily a tool for
             * debug purposes.
             */
            override var logMode: LogMode? = null

            /**
             * The policy to use for evaluating operation results and determining whether/how to retry.
             */
            override var retryPolicy: RetryPolicy? = null

            /**
             * The telemetry provider used to instrument the SDK operations with. By default, the global telemetry
             * provider will be used.
             */
            override var telemetryProvider: TelemetryProvider? = null

            /**
             *            Flag to toggle whether to use dual-stack endpoints when making requests.
             *            See [https://docs.aws.amazon.com/sdkref/latest/guide/feature-endpoints.html] for more information.
             * `          Disabled by default.
             */
            override var useDualStack: Boolean? = null

            /**
             *            Flag to toggle whether to use [FIPS](https://aws.amazon.com/compliance/fips/) endpoints when making requests.
             * `          Disabled by default.
             */
            override var useFips: Boolean? = null

            /**
             * An optional application specific identifier.
             * When set it will be appended to the User-Agent header of every request in the form of: `app/{applicationId}`.
             * When not explicitly set, the value will be loaded from the following locations:
             *
             * - JVM System Property: `aws.userAgentAppId`
             * - Environment variable: `AWS_SDK_UA_APP_ID`
             * - Shared configuration profile attribute: `sdk_ua_app_id`
             *
             * See [shared configuration settings](https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html)
             * reference for more information on environment variables and shared config settings.
             */
            override var applicationId: String? = null

            /**
             * Configure the provider used to resolve the authentication scheme to use for a particular operation.
             */
            public var authSchemeProvider: BatchAuthSchemeProvider? = null

            override fun build(): Config = Config(this)
        }
    }

    /**
     * Cancels a job in an Batch job queue. Jobs that are in the `SUBMITTED` or `PENDING` are canceled. A job in`RUNNABLE` remains in `RUNNABLE` until it reaches the head of the job queue. Then the job status is updated to `FAILED`.
     *
     * A `PENDING` job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in `PENDING` status.
     *
     * When you try to cancel an array parent job in `PENDING`, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed.
     *
     * Jobs that progressed to the `STARTING` or `RUNNING` state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.
     */
    public suspend fun cancelJob(input: CancelJobRequest): CancelJobResponse

    /**
     * Creates an Batch compute environment. You can create `MANAGED` or `UNMANAGED` compute environments. `MANAGED` compute environments can use Amazon EC2 or Fargate resources. `UNMANAGED` compute environments can only use EC2 resources.
     *
     * In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the [launch template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.
     *
     * Multi-node parallel jobs aren't supported on Spot Instances.
     *
     * In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see [container instance AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html) in the *Amazon Elastic Container Service Developer Guide*. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see [Launching an Amazon ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) in the *Amazon Elastic Container Service Developer Guide*.
     *
     * To create a compute environment that uses EKS resources, the caller must have permissions to call `eks:DescribeCluster`.
     *
     * Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:
     * + Create a new compute environment with the new AMI.
     * + Add the compute environment to an existing job queue.
     * + Remove the earlier compute environment from your job queue.
     * + Delete the earlier compute environment.
     * In April 2022, Batch added enhanced support for updating compute environments. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html). To use the enhanced updating of compute environments to update AMIs, follow these rules:
     * + Either don't set the service role (`serviceRole`) parameter or set it to the **AWSBatchServiceRole** service-linked role.
     * + Set the allocation strategy (`allocationStrategy`) parameter to `BEST_FIT_PROGRESSIVE`, `SPOT_CAPACITY_OPTIMIZED`, or `SPOT_PRICE_CAPACITY_OPTIMIZED`.
     * + Set the update to latest image version (`updateToLatestImageVersion`) parameter to `true`. The `updateToLatestImageVersion` parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.
     * + Don't specify an AMI ID in `imageId`, `imageIdOverride` (in `ec2Configuration`[](https://docs.aws.amazon.com/batch/latest/APIReference/API_Ec2Configuration.html)), or in the launch template (`launchTemplate`). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the `imageId` or `imageIdOverride` parameters, or the launch template identified by the `LaunchTemplate` properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the `imageId` or `imageIdOverride` parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to `$Default` or `$Latest`, by setting either a new default version for the launch template (if `$Default`) or by adding a new version to the launch template (if `$Latest`).
     * If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the `version` setting in the launch template (`launchTemplate`) is set to `$Latest` or `$Default`, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the `launchTemplate` wasn't updated.
     */
    public suspend fun createComputeEnvironment(input: CreateComputeEnvironmentRequest): CreateComputeEnvironmentResponse

    /**
     * Creates an Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.
     *
     * You also set a priority to the job queue that determines the order that the Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.
     */
    public suspend fun createJobQueue(input: CreateJobQueueRequest): CreateJobQueueResponse

    /**
     * Creates an Batch scheduling policy.
     */
    public suspend fun createSchedulingPolicy(input: CreateSchedulingPolicyRequest): CreateSchedulingPolicyResponse

    /**
     * Deletes an Batch compute environment.
     *
     * Before you can delete a compute environment, you must set its state to `DISABLED` with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.
     */
    public suspend fun deleteComputeEnvironment(input: DeleteComputeEnvironmentRequest): DeleteComputeEnvironmentResponse

    /**
     * Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are eventually terminated when you delete a job queue. The jobs are terminated at a rate of about 16 jobs each second.
     *
     * It's not necessary to disassociate compute environments from a queue before submitting a `DeleteJobQueue` request.
     */
    public suspend fun deleteJobQueue(input: DeleteJobQueueRequest): DeleteJobQueueResponse

    /**
     * Deletes the specified scheduling policy.
     *
     * You can't delete a scheduling policy that's used in any job queues.
     */
    public suspend fun deleteSchedulingPolicy(input: DeleteSchedulingPolicyRequest): DeleteSchedulingPolicyResponse

    /**
     * Deregisters an Batch job definition. Job definitions are permanently deleted after 180 days.
     */
    public suspend fun deregisterJobDefinition(input: DeregisterJobDefinitionRequest): DeregisterJobDefinitionResponse

    /**
     * Describes one or more of your compute environments.
     *
     * If you're using an unmanaged compute environment, you can use the `DescribeComputeEnvironment` operation to determine the `ecsClusterArn` that you launch your Amazon ECS container instances into.
     */
    public suspend fun describeComputeEnvironments(input: DescribeComputeEnvironmentsRequest = DescribeComputeEnvironmentsRequest { }): DescribeComputeEnvironmentsResponse

    /**
     * Describes a list of job definitions. You can specify a `status` (such as `ACTIVE`) to only return job definitions that match that status.
     */
    public suspend fun describeJobDefinitions(input: DescribeJobDefinitionsRequest = DescribeJobDefinitionsRequest { }): DescribeJobDefinitionsResponse

    /**
     * Describes one or more of your job queues.
     */
    public suspend fun describeJobQueues(input: DescribeJobQueuesRequest = DescribeJobQueuesRequest { }): DescribeJobQueuesResponse

    /**
     * Describes a list of Batch jobs.
     */
    public suspend fun describeJobs(input: DescribeJobsRequest): DescribeJobsResponse

    /**
     * Describes one or more of your scheduling policies.
     */
    public suspend fun describeSchedulingPolicies(input: DescribeSchedulingPoliciesRequest): DescribeSchedulingPoliciesResponse

    /**
     * Returns a list of Batch jobs.
     *
     * You must specify only one of the following items:
     * + A job queue ID to return a list of jobs in that job queue
     * + A multi-node parallel job ID to return a list of nodes for that job
     * + An array job ID to return a list of the children for that job
     *
     * You can filter the results by job status with the `jobStatus` parameter. If you don't specify a status, only `RUNNING` jobs are returned.
     */
    public suspend fun listJobs(input: ListJobsRequest = ListJobsRequest { }): ListJobsResponse

    /**
     * Returns a list of Batch scheduling policies.
     */
    public suspend fun listSchedulingPolicies(input: ListSchedulingPoliciesRequest = ListSchedulingPoliciesRequest { }): ListSchedulingPoliciesResponse

    /**
     * Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs aren't supported.
     */
    public suspend fun listTagsForResource(input: ListTagsForResourceRequest): ListTagsForResourceResponse

    /**
     * Registers an Batch job definition.
     */
    public suspend fun registerJobDefinition(input: RegisterJobDefinitionRequest): RegisterJobDefinitionResponse

    /**
     * Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the `resourceRequirements` objects in the job definition are the exception. They can't be overridden this way using the `memory` and `vcpus` parameters. Rather, you must specify updates to job definition parameters in a `resourceRequirements` object that's included in the `containerOverrides` parameter.
     *
     * Job queues with a scheduling policy are limited to 500 active fair share identifiers at a time.
     *
     * Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.
     */
    public suspend fun submitJob(input: SubmitJobRequest): SubmitJobResponse

    /**
     * Associates the specified tags to a resource with the specified `resourceArn`. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs aren't supported.
     */
    public suspend fun tagResource(input: TagResourceRequest): TagResourceResponse

    /**
     * Terminates a job in a job queue. Jobs that are in the `STARTING` or `RUNNING` state are terminated, which causes them to transition to `FAILED`. Jobs that have not progressed to the `STARTING` state are cancelled.
     */
    public suspend fun terminateJob(input: TerminateJobRequest): TerminateJobResponse

    /**
     * Deletes specified tags from an Batch resource.
     */
    public suspend fun untagResource(input: UntagResourceRequest): UntagResourceResponse

    /**
     * Updates an Batch compute environment.
     */
    public suspend fun updateComputeEnvironment(input: UpdateComputeEnvironmentRequest): UpdateComputeEnvironmentResponse

    /**
     * Updates a job queue.
     */
    public suspend fun updateJobQueue(input: UpdateJobQueueRequest): UpdateJobQueueResponse

    /**
     * Updates a scheduling policy.
     */
    public suspend fun updateSchedulingPolicy(input: UpdateSchedulingPolicyRequest): UpdateSchedulingPolicyResponse
}

/**
 * Create a copy of the client with one or more configuration values overridden.
 * This method allows the caller to perform scoped config overrides for one or more client operations.
 *
 * Any resources created on your behalf will be shared between clients, and will only be closed when ALL clients using them are closed.
 * If you provide a resource (e.g. [HttpClientEngine]) to the SDK, you are responsible for managing the lifetime of that resource.
 */
public fun BatchClient.withConfig(block: BatchClient.Config.Builder.() -> Unit): BatchClient {
    val newConfig = config.toBuilder().apply(block).build()
    return DefaultBatchClient(newConfig)
}

/**
 * Cancels a job in an Batch job queue. Jobs that are in the `SUBMITTED` or `PENDING` are canceled. A job in`RUNNABLE` remains in `RUNNABLE` until it reaches the head of the job queue. Then the job status is updated to `FAILED`.
 *
 * A `PENDING` job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in `PENDING` status.
 *
 * When you try to cancel an array parent job in `PENDING`, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed.
 *
 * Jobs that progressed to the `STARTING` or `RUNNING` state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.
 */
public suspend inline fun BatchClient.cancelJob(crossinline block: CancelJobRequest.Builder.() -> Unit): CancelJobResponse = cancelJob(CancelJobRequest.Builder().apply(block).build())

/**
 * Creates an Batch compute environment. You can create `MANAGED` or `UNMANAGED` compute environments. `MANAGED` compute environments can use Amazon EC2 or Fargate resources. `UNMANAGED` compute environments can only use EC2 resources.
 *
 * In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the [launch template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.
 *
 * Multi-node parallel jobs aren't supported on Spot Instances.
 *
 * In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see [container instance AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html) in the *Amazon Elastic Container Service Developer Guide*. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see [Launching an Amazon ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) in the *Amazon Elastic Container Service Developer Guide*.
 *
 * To create a compute environment that uses EKS resources, the caller must have permissions to call `eks:DescribeCluster`.
 *
 * Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:
 * + Create a new compute environment with the new AMI.
 * + Add the compute environment to an existing job queue.
 * + Remove the earlier compute environment from your job queue.
 * + Delete the earlier compute environment.
 * In April 2022, Batch added enhanced support for updating compute environments. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html). To use the enhanced updating of compute environments to update AMIs, follow these rules:
 * + Either don't set the service role (`serviceRole`) parameter or set it to the **AWSBatchServiceRole** service-linked role.
 * + Set the allocation strategy (`allocationStrategy`) parameter to `BEST_FIT_PROGRESSIVE`, `SPOT_CAPACITY_OPTIMIZED`, or `SPOT_PRICE_CAPACITY_OPTIMIZED`.
 * + Set the update to latest image version (`updateToLatestImageVersion`) parameter to `true`. The `updateToLatestImageVersion` parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.
 * + Don't specify an AMI ID in `imageId`, `imageIdOverride` (in `ec2Configuration`[](https://docs.aws.amazon.com/batch/latest/APIReference/API_Ec2Configuration.html)), or in the launch template (`launchTemplate`). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the `imageId` or `imageIdOverride` parameters, or the launch template identified by the `LaunchTemplate` properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the `imageId` or `imageIdOverride` parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to `$Default` or `$Latest`, by setting either a new default version for the launch template (if `$Default`) or by adding a new version to the launch template (if `$Latest`).
 * If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the `version` setting in the launch template (`launchTemplate`) is set to `$Latest` or `$Default`, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the `launchTemplate` wasn't updated.
 */
public suspend inline fun BatchClient.createComputeEnvironment(crossinline block: CreateComputeEnvironmentRequest.Builder.() -> Unit): CreateComputeEnvironmentResponse = createComputeEnvironment(CreateComputeEnvironmentRequest.Builder().apply(block).build())

/**
 * Creates an Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.
 *
 * You also set a priority to the job queue that determines the order that the Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.
 */
public suspend inline fun BatchClient.createJobQueue(crossinline block: CreateJobQueueRequest.Builder.() -> Unit): CreateJobQueueResponse = createJobQueue(CreateJobQueueRequest.Builder().apply(block).build())

/**
 * Creates an Batch scheduling policy.
 */
public suspend inline fun BatchClient.createSchedulingPolicy(crossinline block: CreateSchedulingPolicyRequest.Builder.() -> Unit): CreateSchedulingPolicyResponse = createSchedulingPolicy(CreateSchedulingPolicyRequest.Builder().apply(block).build())

/**
 * Deletes an Batch compute environment.
 *
 * Before you can delete a compute environment, you must set its state to `DISABLED` with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.
 */
public suspend inline fun BatchClient.deleteComputeEnvironment(crossinline block: DeleteComputeEnvironmentRequest.Builder.() -> Unit): DeleteComputeEnvironmentResponse = deleteComputeEnvironment(DeleteComputeEnvironmentRequest.Builder().apply(block).build())

/**
 * Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are eventually terminated when you delete a job queue. The jobs are terminated at a rate of about 16 jobs each second.
 *
 * It's not necessary to disassociate compute environments from a queue before submitting a `DeleteJobQueue` request.
 */
public suspend inline fun BatchClient.deleteJobQueue(crossinline block: DeleteJobQueueRequest.Builder.() -> Unit): DeleteJobQueueResponse = deleteJobQueue(DeleteJobQueueRequest.Builder().apply(block).build())

/**
 * Deletes the specified scheduling policy.
 *
 * You can't delete a scheduling policy that's used in any job queues.
 */
public suspend inline fun BatchClient.deleteSchedulingPolicy(crossinline block: DeleteSchedulingPolicyRequest.Builder.() -> Unit): DeleteSchedulingPolicyResponse = deleteSchedulingPolicy(DeleteSchedulingPolicyRequest.Builder().apply(block).build())

/**
 * Deregisters an Batch job definition. Job definitions are permanently deleted after 180 days.
 */
public suspend inline fun BatchClient.deregisterJobDefinition(crossinline block: DeregisterJobDefinitionRequest.Builder.() -> Unit): DeregisterJobDefinitionResponse = deregisterJobDefinition(DeregisterJobDefinitionRequest.Builder().apply(block).build())

/**
 * Describes one or more of your compute environments.
 *
 * If you're using an unmanaged compute environment, you can use the `DescribeComputeEnvironment` operation to determine the `ecsClusterArn` that you launch your Amazon ECS container instances into.
 */
public suspend inline fun BatchClient.describeComputeEnvironments(crossinline block: DescribeComputeEnvironmentsRequest.Builder.() -> Unit): DescribeComputeEnvironmentsResponse = describeComputeEnvironments(DescribeComputeEnvironmentsRequest.Builder().apply(block).build())

/**
 * Describes a list of job definitions. You can specify a `status` (such as `ACTIVE`) to only return job definitions that match that status.
 */
public suspend inline fun BatchClient.describeJobDefinitions(crossinline block: DescribeJobDefinitionsRequest.Builder.() -> Unit): DescribeJobDefinitionsResponse = describeJobDefinitions(DescribeJobDefinitionsRequest.Builder().apply(block).build())

/**
 * Describes one or more of your job queues.
 */
public suspend inline fun BatchClient.describeJobQueues(crossinline block: DescribeJobQueuesRequest.Builder.() -> Unit): DescribeJobQueuesResponse = describeJobQueues(DescribeJobQueuesRequest.Builder().apply(block).build())

/**
 * Describes a list of Batch jobs.
 */
public suspend inline fun BatchClient.describeJobs(crossinline block: DescribeJobsRequest.Builder.() -> Unit): DescribeJobsResponse = describeJobs(DescribeJobsRequest.Builder().apply(block).build())

/**
 * Describes one or more of your scheduling policies.
 */
public suspend inline fun BatchClient.describeSchedulingPolicies(crossinline block: DescribeSchedulingPoliciesRequest.Builder.() -> Unit): DescribeSchedulingPoliciesResponse = describeSchedulingPolicies(DescribeSchedulingPoliciesRequest.Builder().apply(block).build())

/**
 * Returns a list of Batch jobs.
 *
 * You must specify only one of the following items:
 * + A job queue ID to return a list of jobs in that job queue
 * + A multi-node parallel job ID to return a list of nodes for that job
 * + An array job ID to return a list of the children for that job
 *
 * You can filter the results by job status with the `jobStatus` parameter. If you don't specify a status, only `RUNNING` jobs are returned.
 */
public suspend inline fun BatchClient.listJobs(crossinline block: ListJobsRequest.Builder.() -> Unit): ListJobsResponse = listJobs(ListJobsRequest.Builder().apply(block).build())

/**
 * Returns a list of Batch scheduling policies.
 */
public suspend inline fun BatchClient.listSchedulingPolicies(crossinline block: ListSchedulingPoliciesRequest.Builder.() -> Unit): ListSchedulingPoliciesResponse = listSchedulingPolicies(ListSchedulingPoliciesRequest.Builder().apply(block).build())

/**
 * Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs aren't supported.
 */
public suspend inline fun BatchClient.listTagsForResource(crossinline block: ListTagsForResourceRequest.Builder.() -> Unit): ListTagsForResourceResponse = listTagsForResource(ListTagsForResourceRequest.Builder().apply(block).build())

/**
 * Registers an Batch job definition.
 */
public suspend inline fun BatchClient.registerJobDefinition(crossinline block: RegisterJobDefinitionRequest.Builder.() -> Unit): RegisterJobDefinitionResponse = registerJobDefinition(RegisterJobDefinitionRequest.Builder().apply(block).build())

/**
 * Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the `resourceRequirements` objects in the job definition are the exception. They can't be overridden this way using the `memory` and `vcpus` parameters. Rather, you must specify updates to job definition parameters in a `resourceRequirements` object that's included in the `containerOverrides` parameter.
 *
 * Job queues with a scheduling policy are limited to 500 active fair share identifiers at a time.
 *
 * Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.
 */
public suspend inline fun BatchClient.submitJob(crossinline block: SubmitJobRequest.Builder.() -> Unit): SubmitJobResponse = submitJob(SubmitJobRequest.Builder().apply(block).build())

/**
 * Associates the specified tags to a resource with the specified `resourceArn`. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs aren't supported.
 */
public suspend inline fun BatchClient.tagResource(crossinline block: TagResourceRequest.Builder.() -> Unit): TagResourceResponse = tagResource(TagResourceRequest.Builder().apply(block).build())

/**
 * Terminates a job in a job queue. Jobs that are in the `STARTING` or `RUNNING` state are terminated, which causes them to transition to `FAILED`. Jobs that have not progressed to the `STARTING` state are cancelled.
 */
public suspend inline fun BatchClient.terminateJob(crossinline block: TerminateJobRequest.Builder.() -> Unit): TerminateJobResponse = terminateJob(TerminateJobRequest.Builder().apply(block).build())

/**
 * Deletes specified tags from an Batch resource.
 */
public suspend inline fun BatchClient.untagResource(crossinline block: UntagResourceRequest.Builder.() -> Unit): UntagResourceResponse = untagResource(UntagResourceRequest.Builder().apply(block).build())

/**
 * Updates an Batch compute environment.
 */
public suspend inline fun BatchClient.updateComputeEnvironment(crossinline block: UpdateComputeEnvironmentRequest.Builder.() -> Unit): UpdateComputeEnvironmentResponse = updateComputeEnvironment(UpdateComputeEnvironmentRequest.Builder().apply(block).build())

/**
 * Updates a job queue.
 */
public suspend inline fun BatchClient.updateJobQueue(crossinline block: UpdateJobQueueRequest.Builder.() -> Unit): UpdateJobQueueResponse = updateJobQueue(UpdateJobQueueRequest.Builder().apply(block).build())

/**
 * Updates a scheduling policy.
 */
public suspend inline fun BatchClient.updateSchedulingPolicy(crossinline block: UpdateSchedulingPolicyRequest.Builder.() -> Unit): UpdateSchedulingPolicyResponse = updateSchedulingPolicy(UpdateSchedulingPolicyRequest.Builder().apply(block).build())




© 2015 - 2025 Weber Informatics LLC | Privacy Policy