com.amazonaws.services.ecs.model.CreateServiceRequest Maven / Gradle / Ivy
Show all versions of aws-java-sdk-ecs Show documentation
/*
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.ecs.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateServiceRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
*
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not
* specify a cluster, the default cluster is assumed.
*
*/
private String cluster;
/**
*
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service
* names must be unique within a cluster, but you can have similarly named services in multiple clusters within a
* Region or across multiple Regions.
*
*/
private String serviceName;
/**
*
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest ACTIVE
* revision is used.
*
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*
*/
private String taskDefinition;
/**
*
* A load balancer object representing the load balancers to use with your service. For more information, see Service Load
* Balancing in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the
* service. The service-linked role is required for services that make use of multiple target groups. For more
* information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use either
* an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you
* specify two target groups (referred to as a targetGroupPair
). During a deployment, AWS CodeDeploy
* determines which task set in your service has the status PRIMARY
and associates one target group
* with it, and then associates the other target group with the replacement task set. The load balancer can also
* have up to two listeners: a required listener for production traffic and an optional listener that allows you
* perform validation tests with Lambda functions before routing production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or target
* group ARN, container name, and container port specified in the service definition are immutable. If you are using
* the CODE_DEPLOY
deployment controller, these values can be changed when updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target
* group ARN, the container name (as it appears in a container definition), and the container port to access from
* the load balancer. When a task from this service is placed on a container instance, the container instance and
* port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in
* a container definition), and the container port to access from the load balancer. When a task from this service
* is placed on a container instance, the container instance is registered with the load balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate launch
* type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not
* supported. Also, when you create any target groups for these services, you must choose ip
as the
* target type, not instance
, because tasks that use the awsvpc
network mode are
* associated with an elastic network interface, not an Amazon EC2 instance.
*
*/
private com.amazonaws.internal.SdkInternalList loadBalancers;
/**
*
* The details of the service discovery registries to assign to this service. For more information, see Service Discovery.
*
*
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more
* information, see AWS
* Fargate Platform Versions.
*
*
*/
private com.amazonaws.internal.SdkInternalList serviceRegistries;
/**
*
* The number of instantiations of the specified task definition to place and keep running on your cluster.
*
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*
*/
private Integer desiredCount;
/**
*
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII
* characters are allowed.
*
*/
private String clientToken;
/**
*
* The launch type on which to run your service. For more information, see Amazon ECS Launch Types
* in the Amazon Elastic Container Service Developer Guide.
*
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
*
*/
private String launchType;
/**
*
* The capacity provider strategy to use for the service.
*
*
* A capacity provider strategy consists of one or more capacity providers along with the base
and
* weight
to assign to them. A capacity provider must be associated with the cluster to be used in a
* capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider
* with a cluster. Only capacity providers with an ACTIVE
or UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be omitted.
* If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created.
* New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
* capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be
* associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity providers
* for a cluster after the cluster is created.
*
*/
private com.amazonaws.internal.SdkInternalList capacityProviderStrategy;
/**
*
* The platform version that your tasks in the service are running on. A platform version is specified only for
* tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version is used by
* default. For more information, see AWS Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.
*
*/
private String platformVersion;
/**
*
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load
* balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and
* your task definition does not use the awsvpc
network mode. If you specify the role
* parameter, you must also specify a load balancer object with the loadBalancers
parameter.
*
*
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for your
* service unless you specify a role here. The service-linked role is required if your task definition uses the
* awsvpc
network mode or if the service is configured to use service discovery, an external deployment
* controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role
* here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role ARN (this
* is recommended) or prefix the role name with the path. For example, if a role with the name bar
has
* a path of /foo/
then you would specify /foo/bar
as the role name. For more information,
* see Friendly Names and Paths in the IAM User Guide.
*
*/
private String role;
/**
*
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping
* and starting tasks.
*
*/
private DeploymentConfiguration deploymentConfiguration;
/**
*
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at runtime).
*
*/
private com.amazonaws.internal.SdkInternalList placementConstraints;
/**
*
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules
* per service.
*
*/
private com.amazonaws.internal.SdkInternalList placementStrategy;
/**
*
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported for
* other network modes. For more information, see Task Networking in
* the Amazon Elastic Container Service Developer Guide.
*
*/
private NetworkConfiguration networkConfiguration;
/**
*
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is configured
* to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace
* period value, the default value of 0
is used.
*
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
* scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as
* unhealthy and stopping them before they have time to come up.
*
*/
private Integer healthCheckGracePeriodSeconds;
/**
*
* The scheduling strategy to use for the service. For more information, see Services.
*
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your
* cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement
* strategies and constraints to customize task placement decisions. This scheduler strategy is required if the
* service is using the CODE_DEPLOY
or EXTERNAL
deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container instance
* that meets all of the task placement constraints that you specify in your cluster. The service scheduler also
* evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement
* constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task
* placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
*
*/
private String schedulingStrategy;
/**
*
* The deployment controller to use for the service.
*
*/
private DeploymentController deploymentController;
/**
*
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key
* and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this
* prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*
*/
private com.amazonaws.internal.SdkInternalList tags;
/**
*
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see
* Tagging Your Amazon ECS
* Resources in the Amazon Elastic Container Service Developer Guide.
*
*/
private Boolean enableECSManagedTags;
/**
*
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If
* no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service
* during service creation. To add tags to a task after service creation, use the TagResource API action.
*
*/
private String propagateTags;
/**
*
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not
* specify a cluster, the default cluster is assumed.
*
*
* @param cluster
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do
* not specify a cluster, the default cluster is assumed.
*/
public void setCluster(String cluster) {
this.cluster = cluster;
}
/**
*
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not
* specify a cluster, the default cluster is assumed.
*
*
* @return The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do
* not specify a cluster, the default cluster is assumed.
*/
public String getCluster() {
return this.cluster;
}
/**
*
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not
* specify a cluster, the default cluster is assumed.
*
*
* @param cluster
* The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do
* not specify a cluster, the default cluster is assumed.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withCluster(String cluster) {
setCluster(cluster);
return this;
}
/**
*
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service
* names must be unique within a cluster, but you can have similarly named services in multiple clusters within a
* Region or across multiple Regions.
*
*
* @param serviceName
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed.
* Service names must be unique within a cluster, but you can have similarly named services in multiple
* clusters within a Region or across multiple Regions.
*/
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
/**
*
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service
* names must be unique within a cluster, but you can have similarly named services in multiple clusters within a
* Region or across multiple Regions.
*
*
* @return The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed.
* Service names must be unique within a cluster, but you can have similarly named services in multiple
* clusters within a Region or across multiple Regions.
*/
public String getServiceName() {
return this.serviceName;
}
/**
*
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service
* names must be unique within a cluster, but you can have similarly named services in multiple clusters within a
* Region or across multiple Regions.
*
*
* @param serviceName
* The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed.
* Service names must be unique within a cluster, but you can have similarly named services in multiple
* clusters within a Region or across multiple Regions.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withServiceName(String serviceName) {
setServiceName(serviceName);
return this;
}
/**
*
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest ACTIVE
* revision is used.
*
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*
*
* @param taskDefinition
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest
* ACTIVE
revision is used.
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*/
public void setTaskDefinition(String taskDefinition) {
this.taskDefinition = taskDefinition;
}
/**
*
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest ACTIVE
* revision is used.
*
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*
*
* @return The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest
* ACTIVE
revision is used.
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*/
public String getTaskDefinition() {
return this.taskDefinition;
}
/**
*
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest ACTIVE
* revision is used.
*
*
* A task definition must be specified if the service is using the ECS
deployment controller.
*
*
* @param taskDefinition
* The family
and revision
(family:revision
) or full ARN of the task
* definition to run in your service. If a revision
is not specified, the latest
* ACTIVE
revision is used.
*
* A task definition must be specified if the service is using the ECS
deployment controller.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withTaskDefinition(String taskDefinition) {
setTaskDefinition(taskDefinition);
return this;
}
/**
*
* A load balancer object representing the load balancers to use with your service. For more information, see Service Load
* Balancing in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the
* service. The service-linked role is required for services that make use of multiple target groups. For more
* information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use either
* an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you
* specify two target groups (referred to as a targetGroupPair
). During a deployment, AWS CodeDeploy
* determines which task set in your service has the status PRIMARY
and associates one target group
* with it, and then associates the other target group with the replacement task set. The load balancer can also
* have up to two listeners: a required listener for production traffic and an optional listener that allows you
* perform validation tests with Lambda functions before routing production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or target
* group ARN, container name, and container port specified in the service definition are immutable. If you are using
* the CODE_DEPLOY
deployment controller, these values can be changed when updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target
* group ARN, the container name (as it appears in a container definition), and the container port to access from
* the load balancer. When a task from this service is placed on a container instance, the container instance and
* port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in
* a container definition), and the container port to access from the load balancer. When a task from this service
* is placed on a container instance, the container instance is registered with the load balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate launch
* type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not
* supported. Also, when you create any target groups for these services, you must choose ip
as the
* target type, not instance
, because tasks that use the awsvpc
network mode are
* associated with an elastic network interface, not an Amazon EC2 instance.
*
*
* @return A load balancer object representing the load balancers to use with your service. For more information,
* see Service
* Load Balancing in the Amazon Elastic Container Service Developer Guide.
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to
* the service. The service-linked role is required for services that make use of multiple target groups.
* For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to
* use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy
* deployment group, you specify two target groups (referred to as a targetGroupPair
). During a
* deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY
* and associates one target group with it, and then associates the other target group with the replacement
* task set. The load balancer can also have up to two listeners: a required listener for production traffic
* and an optional listener that allows you perform validation tests with Lambda functions before routing
* production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or
* target group ARN, container name, and container port specified in the service definition are immutable.
* If you are using the CODE_DEPLOY
deployment controller, these values can be changed when
* updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer
* target group ARN, the container name (as it appears in a container definition), and the container port to
* access from the load balancer. When a task from this service is placed on a container instance, the
* container instance and port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it
* appears in a container definition), and the container port to access from the load balancer. When a task
* from this service is placed on a container instance, the container instance is registered with the load
* balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate
* launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers
* are not supported. Also, when you create any target groups for these services, you must choose
* ip
as the target type, not instance
, because tasks that use the
* awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2
* instance.
*/
public java.util.List getLoadBalancers() {
if (loadBalancers == null) {
loadBalancers = new com.amazonaws.internal.SdkInternalList();
}
return loadBalancers;
}
/**
*
* A load balancer object representing the load balancers to use with your service. For more information, see Service Load
* Balancing in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the
* service. The service-linked role is required for services that make use of multiple target groups. For more
* information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use either
* an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you
* specify two target groups (referred to as a targetGroupPair
). During a deployment, AWS CodeDeploy
* determines which task set in your service has the status PRIMARY
and associates one target group
* with it, and then associates the other target group with the replacement task set. The load balancer can also
* have up to two listeners: a required listener for production traffic and an optional listener that allows you
* perform validation tests with Lambda functions before routing production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or target
* group ARN, container name, and container port specified in the service definition are immutable. If you are using
* the CODE_DEPLOY
deployment controller, these values can be changed when updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target
* group ARN, the container name (as it appears in a container definition), and the container port to access from
* the load balancer. When a task from this service is placed on a container instance, the container instance and
* port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in
* a container definition), and the container port to access from the load balancer. When a task from this service
* is placed on a container instance, the container instance is registered with the load balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate launch
* type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not
* supported. Also, when you create any target groups for these services, you must choose ip
as the
* target type, not instance
, because tasks that use the awsvpc
network mode are
* associated with an elastic network interface, not an Amazon EC2 instance.
*
*
* @param loadBalancers
* A load balancer object representing the load balancers to use with your service. For more information, see
* Service
* Load Balancing in the Amazon Elastic Container Service Developer Guide.
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to
* the service. The service-linked role is required for services that make use of multiple target groups. For
* more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use
* either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment
* group, you specify two target groups (referred to as a targetGroupPair
). During a deployment,
* AWS CodeDeploy determines which task set in your service has the status PRIMARY
and
* associates one target group with it, and then associates the other target group with the replacement task
* set. The load balancer can also have up to two listeners: a required listener for production traffic and
* an optional listener that allows you perform validation tests with Lambda functions before routing
* production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or
* target group ARN, container name, and container port specified in the service definition are immutable. If
* you are using the CODE_DEPLOY
deployment controller, these values can be changed when
* updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer
* target group ARN, the container name (as it appears in a container definition), and the container port to
* access from the load balancer. When a task from this service is placed on a container instance, the
* container instance and port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it
* appears in a container definition), and the container port to access from the load balancer. When a task
* from this service is placed on a container instance, the container instance is registered with the load
* balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate
* launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers
* are not supported. Also, when you create any target groups for these services, you must choose
* ip
as the target type, not instance
, because tasks that use the
* awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2
* instance.
*/
public void setLoadBalancers(java.util.Collection loadBalancers) {
if (loadBalancers == null) {
this.loadBalancers = null;
return;
}
this.loadBalancers = new com.amazonaws.internal.SdkInternalList(loadBalancers);
}
/**
*
* A load balancer object representing the load balancers to use with your service. For more information, see Service Load
* Balancing in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the
* service. The service-linked role is required for services that make use of multiple target groups. For more
* information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use either
* an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you
* specify two target groups (referred to as a targetGroupPair
). During a deployment, AWS CodeDeploy
* determines which task set in your service has the status PRIMARY
and associates one target group
* with it, and then associates the other target group with the replacement task set. The load balancer can also
* have up to two listeners: a required listener for production traffic and an optional listener that allows you
* perform validation tests with Lambda functions before routing production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or target
* group ARN, container name, and container port specified in the service definition are immutable. If you are using
* the CODE_DEPLOY
deployment controller, these values can be changed when updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target
* group ARN, the container name (as it appears in a container definition), and the container port to access from
* the load balancer. When a task from this service is placed on a container instance, the container instance and
* port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in
* a container definition), and the container port to access from the load balancer. When a task from this service
* is placed on a container instance, the container instance is registered with the load balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate launch
* type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not
* supported. Also, when you create any target groups for these services, you must choose ip
as the
* target type, not instance
, because tasks that use the awsvpc
network mode are
* associated with an elastic network interface, not an Amazon EC2 instance.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setLoadBalancers(java.util.Collection)} or {@link #withLoadBalancers(java.util.Collection)} if you want
* to override the existing values.
*
*
* @param loadBalancers
* A load balancer object representing the load balancers to use with your service. For more information, see
* Service
* Load Balancing in the Amazon Elastic Container Service Developer Guide.
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to
* the service. The service-linked role is required for services that make use of multiple target groups. For
* more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use
* either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment
* group, you specify two target groups (referred to as a targetGroupPair
). During a deployment,
* AWS CodeDeploy determines which task set in your service has the status PRIMARY
and
* associates one target group with it, and then associates the other target group with the replacement task
* set. The load balancer can also have up to two listeners: a required listener for production traffic and
* an optional listener that allows you perform validation tests with Lambda functions before routing
* production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or
* target group ARN, container name, and container port specified in the service definition are immutable. If
* you are using the CODE_DEPLOY
deployment controller, these values can be changed when
* updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer
* target group ARN, the container name (as it appears in a container definition), and the container port to
* access from the load balancer. When a task from this service is placed on a container instance, the
* container instance and port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it
* appears in a container definition), and the container port to access from the load balancer. When a task
* from this service is placed on a container instance, the container instance is registered with the load
* balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate
* launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers
* are not supported. Also, when you create any target groups for these services, you must choose
* ip
as the target type, not instance
, because tasks that use the
* awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2
* instance.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withLoadBalancers(LoadBalancer... loadBalancers) {
if (this.loadBalancers == null) {
setLoadBalancers(new com.amazonaws.internal.SdkInternalList(loadBalancers.length));
}
for (LoadBalancer ele : loadBalancers) {
this.loadBalancers.add(ele);
}
return this;
}
/**
*
* A load balancer object representing the load balancers to use with your service. For more information, see Service Load
* Balancing in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the
* service. The service-linked role is required for services that make use of multiple target groups. For more
* information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use either
* an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you
* specify two target groups (referred to as a targetGroupPair
). During a deployment, AWS CodeDeploy
* determines which task set in your service has the status PRIMARY
and associates one target group
* with it, and then associates the other target group with the replacement task set. The load balancer can also
* have up to two listeners: a required listener for production traffic and an optional listener that allows you
* perform validation tests with Lambda functions before routing production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or target
* group ARN, container name, and container port specified in the service definition are immutable. If you are using
* the CODE_DEPLOY
deployment controller, these values can be changed when updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target
* group ARN, the container name (as it appears in a container definition), and the container port to access from
* the load balancer. When a task from this service is placed on a container instance, the container instance and
* port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in
* a container definition), and the container port to access from the load balancer. When a task from this service
* is placed on a container instance, the container instance is registered with the load balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate launch
* type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not
* supported. Also, when you create any target groups for these services, you must choose ip
as the
* target type, not instance
, because tasks that use the awsvpc
network mode are
* associated with an elastic network interface, not an Amazon EC2 instance.
*
*
* @param loadBalancers
* A load balancer object representing the load balancers to use with your service. For more information, see
* Service
* Load Balancing in the Amazon Elastic Container Service Developer Guide.
*
* If the service is using the rolling update (ECS
) deployment controller and using either an
* Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to
* the service. The service-linked role is required for services that make use of multiple target groups. For
* more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
* If the service is using the CODE_DEPLOY
deployment controller, the service is required to use
* either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment
* group, you specify two target groups (referred to as a targetGroupPair
). During a deployment,
* AWS CodeDeploy determines which task set in your service has the status PRIMARY
and
* associates one target group with it, and then associates the other target group with the replacement task
* set. The load balancer can also have up to two listeners: a required listener for production traffic and
* an optional listener that allows you perform validation tests with Lambda functions before routing
* production traffic to it.
*
*
* After you create a service using the ECS
deployment controller, the load balancer name or
* target group ARN, container name, and container port specified in the service definition are immutable. If
* you are using the CODE_DEPLOY
deployment controller, these values can be changed when
* updating the service.
*
*
* For Application Load Balancers and Network Load Balancers, this object must contain the load balancer
* target group ARN, the container name (as it appears in a container definition), and the container port to
* access from the load balancer. When a task from this service is placed on a container instance, the
* container instance and port combination is registered as a target in the target group specified here.
*
*
* For Classic Load Balancers, this object must contain the load balancer name, the container name (as it
* appears in a container definition), and the container port to access from the load balancer. When a task
* from this service is placed on a container instance, the container instance is registered with the load
* balancer specified here.
*
*
* Services with tasks that use the awsvpc
network mode (for example, those with the Fargate
* launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers
* are not supported. Also, when you create any target groups for these services, you must choose
* ip
as the target type, not instance
, because tasks that use the
* awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2
* instance.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withLoadBalancers(java.util.Collection loadBalancers) {
setLoadBalancers(loadBalancers);
return this;
}
/**
*
* The details of the service discovery registries to assign to this service. For more information, see Service Discovery.
*
*
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more
* information, see AWS
* Fargate Platform Versions.
*
*
*
* @return The details of the service discovery registries to assign to this service. For more information, see Service
* Discovery.
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For
* more information, see AWS Fargate
* Platform Versions.
*
*/
public java.util.List getServiceRegistries() {
if (serviceRegistries == null) {
serviceRegistries = new com.amazonaws.internal.SdkInternalList();
}
return serviceRegistries;
}
/**
*
* The details of the service discovery registries to assign to this service. For more information, see Service Discovery.
*
*
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more
* information, see AWS
* Fargate Platform Versions.
*
*
*
* @param serviceRegistries
* The details of the service discovery registries to assign to this service. For more information, see Service
* Discovery.
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For
* more information, see AWS Fargate
* Platform Versions.
*
*/
public void setServiceRegistries(java.util.Collection serviceRegistries) {
if (serviceRegistries == null) {
this.serviceRegistries = null;
return;
}
this.serviceRegistries = new com.amazonaws.internal.SdkInternalList(serviceRegistries);
}
/**
*
* The details of the service discovery registries to assign to this service. For more information, see Service Discovery.
*
*
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more
* information, see AWS
* Fargate Platform Versions.
*
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setServiceRegistries(java.util.Collection)} or {@link #withServiceRegistries(java.util.Collection)} if
* you want to override the existing values.
*
*
* @param serviceRegistries
* The details of the service discovery registries to assign to this service. For more information, see Service
* Discovery.
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For
* more information, see AWS Fargate
* Platform Versions.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withServiceRegistries(ServiceRegistry... serviceRegistries) {
if (this.serviceRegistries == null) {
setServiceRegistries(new com.amazonaws.internal.SdkInternalList(serviceRegistries.length));
}
for (ServiceRegistry ele : serviceRegistries) {
this.serviceRegistries.add(ele);
}
return this;
}
/**
*
* The details of the service discovery registries to assign to this service. For more information, see Service Discovery.
*
*
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more
* information, see AWS
* Fargate Platform Versions.
*
*
*
* @param serviceRegistries
* The details of the service discovery registries to assign to this service. For more information, see Service
* Discovery.
*
* Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For
* more information, see AWS Fargate
* Platform Versions.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withServiceRegistries(java.util.Collection serviceRegistries) {
setServiceRegistries(serviceRegistries);
return this;
}
/**
*
* The number of instantiations of the specified task definition to place and keep running on your cluster.
*
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*
*
* @param desiredCount
* The number of instantiations of the specified task definition to place and keep running on your
* cluster.
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*/
public void setDesiredCount(Integer desiredCount) {
this.desiredCount = desiredCount;
}
/**
*
* The number of instantiations of the specified task definition to place and keep running on your cluster.
*
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*
*
* @return The number of instantiations of the specified task definition to place and keep running on your
* cluster.
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*/
public Integer getDesiredCount() {
return this.desiredCount;
}
/**
*
* The number of instantiations of the specified task definition to place and keep running on your cluster.
*
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
*
*
* @param desiredCount
* The number of instantiations of the specified task definition to place and keep running on your
* cluster.
*
* This is required if schedulingStrategy
is REPLICA
or is not specified. If
* schedulingStrategy
is DAEMON
then this is not required.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withDesiredCount(Integer desiredCount) {
setDesiredCount(desiredCount);
return this;
}
/**
*
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII
* characters are allowed.
*
*
* @param clientToken
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32
* ASCII characters are allowed.
*/
public void setClientToken(String clientToken) {
this.clientToken = clientToken;
}
/**
*
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII
* characters are allowed.
*
*
* @return Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32
* ASCII characters are allowed.
*/
public String getClientToken() {
return this.clientToken;
}
/**
*
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII
* characters are allowed.
*
*
* @param clientToken
* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32
* ASCII characters are allowed.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withClientToken(String clientToken) {
setClientToken(clientToken);
return this;
}
/**
*
* The launch type on which to run your service. For more information, see Amazon ECS Launch Types
* in the Amazon Elastic Container Service Developer Guide.
*
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
*
*
* @param launchType
* The launch type on which to run your service. For more information, see Amazon ECS Launch
* Types in the Amazon Elastic Container Service Developer Guide.
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be
* omitted.
* @see LaunchType
*/
public void setLaunchType(String launchType) {
this.launchType = launchType;
}
/**
*
* The launch type on which to run your service. For more information, see Amazon ECS Launch Types
* in the Amazon Elastic Container Service Developer Guide.
*
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
*
*
* @return The launch type on which to run your service. For more information, see Amazon ECS Launch
* Types in the Amazon Elastic Container Service Developer Guide.
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be
* omitted.
* @see LaunchType
*/
public String getLaunchType() {
return this.launchType;
}
/**
*
* The launch type on which to run your service. For more information, see Amazon ECS Launch Types
* in the Amazon Elastic Container Service Developer Guide.
*
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
*
*
* @param launchType
* The launch type on which to run your service. For more information, see Amazon ECS Launch
* Types in the Amazon Elastic Container Service Developer Guide.
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be
* omitted.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LaunchType
*/
public CreateServiceRequest withLaunchType(String launchType) {
setLaunchType(launchType);
return this;
}
/**
*
* The launch type on which to run your service. For more information, see Amazon ECS Launch Types
* in the Amazon Elastic Container Service Developer Guide.
*
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
*
*
* @param launchType
* The launch type on which to run your service. For more information, see Amazon ECS Launch
* Types in the Amazon Elastic Container Service Developer Guide.
*
* If a launchType
is specified, the capacityProviderStrategy
parameter must be
* omitted.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LaunchType
*/
public CreateServiceRequest withLaunchType(LaunchType launchType) {
this.launchType = launchType.toString();
return this;
}
/**
*
* The capacity provider strategy to use for the service.
*
*
* A capacity provider strategy consists of one or more capacity providers along with the base
and
* weight
to assign to them. A capacity provider must be associated with the cluster to be used in a
* capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider
* with a cluster. Only capacity providers with an ACTIVE
or UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be omitted.
* If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created.
* New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
* capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be
* associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity providers
* for a cluster after the cluster is created.
*
*
* @return The capacity provider strategy to use for the service.
*
* A capacity provider strategy consists of one or more capacity providers along with the base
* and weight
to assign to them. A capacity provider must be associated with the cluster to be
* used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a
* capacity provider with a cluster. Only capacity providers with an ACTIVE
or
* UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be
* omitted. If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be
* created. New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or
* FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are available to all
* accounts and only need to be associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity
* providers for a cluster after the cluster is created.
*/
public java.util.List getCapacityProviderStrategy() {
if (capacityProviderStrategy == null) {
capacityProviderStrategy = new com.amazonaws.internal.SdkInternalList();
}
return capacityProviderStrategy;
}
/**
*
* The capacity provider strategy to use for the service.
*
*
* A capacity provider strategy consists of one or more capacity providers along with the base
and
* weight
to assign to them. A capacity provider must be associated with the cluster to be used in a
* capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider
* with a cluster. Only capacity providers with an ACTIVE
or UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be omitted.
* If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created.
* New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
* capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be
* associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity providers
* for a cluster after the cluster is created.
*
*
* @param capacityProviderStrategy
* The capacity provider strategy to use for the service.
*
* A capacity provider strategy consists of one or more capacity providers along with the base
* and weight
to assign to them. A capacity provider must be associated with the cluster to be
* used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a
* capacity provider with a cluster. Only capacity providers with an ACTIVE
or
* UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be
* omitted. If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be
* created. New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or
* FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are available to all
* accounts and only need to be associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity
* providers for a cluster after the cluster is created.
*/
public void setCapacityProviderStrategy(java.util.Collection capacityProviderStrategy) {
if (capacityProviderStrategy == null) {
this.capacityProviderStrategy = null;
return;
}
this.capacityProviderStrategy = new com.amazonaws.internal.SdkInternalList(capacityProviderStrategy);
}
/**
*
* The capacity provider strategy to use for the service.
*
*
* A capacity provider strategy consists of one or more capacity providers along with the base
and
* weight
to assign to them. A capacity provider must be associated with the cluster to be used in a
* capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider
* with a cluster. Only capacity providers with an ACTIVE
or UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be omitted.
* If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created.
* New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
* capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be
* associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity providers
* for a cluster after the cluster is created.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setCapacityProviderStrategy(java.util.Collection)} or
* {@link #withCapacityProviderStrategy(java.util.Collection)} if you want to override the existing values.
*
*
* @param capacityProviderStrategy
* The capacity provider strategy to use for the service.
*
* A capacity provider strategy consists of one or more capacity providers along with the base
* and weight
to assign to them. A capacity provider must be associated with the cluster to be
* used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a
* capacity provider with a cluster. Only capacity providers with an ACTIVE
or
* UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be
* omitted. If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be
* created. New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or
* FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are available to all
* accounts and only need to be associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity
* providers for a cluster after the cluster is created.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withCapacityProviderStrategy(CapacityProviderStrategyItem... capacityProviderStrategy) {
if (this.capacityProviderStrategy == null) {
setCapacityProviderStrategy(new com.amazonaws.internal.SdkInternalList(capacityProviderStrategy.length));
}
for (CapacityProviderStrategyItem ele : capacityProviderStrategy) {
this.capacityProviderStrategy.add(ele);
}
return this;
}
/**
*
* The capacity provider strategy to use for the service.
*
*
* A capacity provider strategy consists of one or more capacity providers along with the base
and
* weight
to assign to them. A capacity provider must be associated with the cluster to be used in a
* capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider
* with a cluster. Only capacity providers with an ACTIVE
or UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be omitted.
* If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created.
* New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
* capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be
* associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity providers
* for a cluster after the cluster is created.
*
*
* @param capacityProviderStrategy
* The capacity provider strategy to use for the service.
*
* A capacity provider strategy consists of one or more capacity providers along with the base
* and weight
to assign to them. A capacity provider must be associated with the cluster to be
* used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a
* capacity provider with a cluster. Only capacity providers with an ACTIVE
or
* UPDATING
status can be used.
*
*
* If a capacityProviderStrategy
is specified, the launchType
parameter must be
* omitted. If no capacityProviderStrategy
or launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
*
*
* If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be
* created. New capacity providers can be created with the CreateCapacityProvider API operation.
*
*
* To use a AWS Fargate capacity provider, specify either the FARGATE
or
* FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are available to all
* accounts and only need to be associated with a cluster to be used.
*
*
* The PutClusterCapacityProviders API operation is used to update the list of available capacity
* providers for a cluster after the cluster is created.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withCapacityProviderStrategy(java.util.Collection capacityProviderStrategy) {
setCapacityProviderStrategy(capacityProviderStrategy);
return this;
}
/**
*
* The platform version that your tasks in the service are running on. A platform version is specified only for
* tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version is used by
* default. For more information, see AWS Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.
*
*
* @param platformVersion
* The platform version that your tasks in the service are running on. A platform version is specified only
* for tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version
* is used by default. For more information, see AWS Fargate
* Platform Versions in the Amazon Elastic Container Service Developer Guide.
*/
public void setPlatformVersion(String platformVersion) {
this.platformVersion = platformVersion;
}
/**
*
* The platform version that your tasks in the service are running on. A platform version is specified only for
* tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version is used by
* default. For more information, see AWS Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.
*
*
* @return The platform version that your tasks in the service are running on. A platform version is specified only
* for tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version
* is used by default. For more information, see AWS Fargate
* Platform Versions in the Amazon Elastic Container Service Developer Guide.
*/
public String getPlatformVersion() {
return this.platformVersion;
}
/**
*
* The platform version that your tasks in the service are running on. A platform version is specified only for
* tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version is used by
* default. For more information, see AWS Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.
*
*
* @param platformVersion
* The platform version that your tasks in the service are running on. A platform version is specified only
* for tasks using the Fargate launch type. If one isn't specified, the LATEST
platform version
* is used by default. For more information, see AWS Fargate
* Platform Versions in the Amazon Elastic Container Service Developer Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withPlatformVersion(String platformVersion) {
setPlatformVersion(platformVersion);
return this;
}
/**
*
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load
* balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and
* your task definition does not use the awsvpc
network mode. If you specify the role
* parameter, you must also specify a load balancer object with the loadBalancers
parameter.
*
*
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for your
* service unless you specify a role here. The service-linked role is required if your task definition uses the
* awsvpc
network mode or if the service is configured to use service discovery, an external deployment
* controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role
* here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role ARN (this
* is recommended) or prefix the role name with the path. For example, if a role with the name bar
has
* a path of /foo/
then you would specify /foo/bar
as the role name. For more information,
* see Friendly Names and Paths in the IAM User Guide.
*
*
* @param role
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your
* load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your
* service and your task definition does not use the awsvpc
network mode. If you specify the
* role
parameter, you must also specify a load balancer object with the
* loadBalancers
parameter.
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for
* your service unless you specify a role here. The service-linked role is required if your task definition
* uses the awsvpc
network mode or if the service is configured to use service discovery, an
* external deployment controller, multiple target groups, or Elastic Inference accelerators in which case
* you should not specify a role here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role
* ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name
* bar
has a path of /foo/
then you would specify /foo/bar
as the role
* name. For more information, see Friendly Names and Paths in the IAM User Guide.
*/
public void setRole(String role) {
this.role = role;
}
/**
*
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load
* balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and
* your task definition does not use the awsvpc
network mode. If you specify the role
* parameter, you must also specify a load balancer object with the loadBalancers
parameter.
*
*
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for your
* service unless you specify a role here. The service-linked role is required if your task definition uses the
* awsvpc
network mode or if the service is configured to use service discovery, an external deployment
* controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role
* here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role ARN (this
* is recommended) or prefix the role name with the path. For example, if a role with the name bar
has
* a path of /foo/
then you would specify /foo/bar
as the role name. For more information,
* see Friendly Names and Paths in the IAM User Guide.
*
*
* @return The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your
* load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your
* service and your task definition does not use the awsvpc
network mode. If you specify the
* role
parameter, you must also specify a load balancer object with the
* loadBalancers
parameter.
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for
* your service unless you specify a role here. The service-linked role is required if your task definition
* uses the awsvpc
network mode or if the service is configured to use service discovery, an
* external deployment controller, multiple target groups, or Elastic Inference accelerators in which case
* you should not specify a role here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role
* ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name
* bar
has a path of /foo/
then you would specify /foo/bar
as the
* role name. For more information, see Friendly Names and Paths in the IAM User Guide.
*/
public String getRole() {
return this.role;
}
/**
*
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load
* balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and
* your task definition does not use the awsvpc
network mode. If you specify the role
* parameter, you must also specify a load balancer object with the loadBalancers
parameter.
*
*
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for your
* service unless you specify a role here. The service-linked role is required if your task definition uses the
* awsvpc
network mode or if the service is configured to use service discovery, an external deployment
* controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role
* here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role ARN (this
* is recommended) or prefix the role name with the path. For example, if a role with the name bar
has
* a path of /foo/
then you would specify /foo/bar
as the role name. For more information,
* see Friendly Names and Paths in the IAM User Guide.
*
*
* @param role
* The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your
* load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your
* service and your task definition does not use the awsvpc
network mode. If you specify the
* role
parameter, you must also specify a load balancer object with the
* loadBalancers
parameter.
*
* If your account has already created the Amazon ECS service-linked role, that role is used by default for
* your service unless you specify a role here. The service-linked role is required if your task definition
* uses the awsvpc
network mode or if the service is configured to use service discovery, an
* external deployment controller, multiple target groups, or Elastic Inference accelerators in which case
* you should not specify a role here. For more information, see Using
* Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*
*
*
* If your specified role has a path other than /
, then you must either specify the full role
* ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name
* bar
has a path of /foo/
then you would specify /foo/bar
as the role
* name. For more information, see Friendly Names and Paths in the IAM User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withRole(String role) {
setRole(role);
return this;
}
/**
*
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping
* and starting tasks.
*
*
* @param deploymentConfiguration
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of
* stopping and starting tasks.
*/
public void setDeploymentConfiguration(DeploymentConfiguration deploymentConfiguration) {
this.deploymentConfiguration = deploymentConfiguration;
}
/**
*
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping
* and starting tasks.
*
*
* @return Optional deployment parameters that control how many tasks run during the deployment and the ordering of
* stopping and starting tasks.
*/
public DeploymentConfiguration getDeploymentConfiguration() {
return this.deploymentConfiguration;
}
/**
*
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping
* and starting tasks.
*
*
* @param deploymentConfiguration
* Optional deployment parameters that control how many tasks run during the deployment and the ordering of
* stopping and starting tasks.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withDeploymentConfiguration(DeploymentConfiguration deploymentConfiguration) {
setDeploymentConfiguration(deploymentConfiguration);
return this;
}
/**
*
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at runtime).
*
*
* @return An array of placement constraint objects to use for tasks in your service. You can specify a maximum of
* 10 constraints per task (this limit includes constraints in the task definition and those specified at
* runtime).
*/
public java.util.List getPlacementConstraints() {
if (placementConstraints == null) {
placementConstraints = new com.amazonaws.internal.SdkInternalList();
}
return placementConstraints;
}
/**
*
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at runtime).
*
*
* @param placementConstraints
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at
* runtime).
*/
public void setPlacementConstraints(java.util.Collection placementConstraints) {
if (placementConstraints == null) {
this.placementConstraints = null;
return;
}
this.placementConstraints = new com.amazonaws.internal.SdkInternalList(placementConstraints);
}
/**
*
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at runtime).
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setPlacementConstraints(java.util.Collection)} or {@link #withPlacementConstraints(java.util.Collection)}
* if you want to override the existing values.
*
*
* @param placementConstraints
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at
* runtime).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withPlacementConstraints(PlacementConstraint... placementConstraints) {
if (this.placementConstraints == null) {
setPlacementConstraints(new com.amazonaws.internal.SdkInternalList(placementConstraints.length));
}
for (PlacementConstraint ele : placementConstraints) {
this.placementConstraints.add(ele);
}
return this;
}
/**
*
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at runtime).
*
*
* @param placementConstraints
* An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10
* constraints per task (this limit includes constraints in the task definition and those specified at
* runtime).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withPlacementConstraints(java.util.Collection placementConstraints) {
setPlacementConstraints(placementConstraints);
return this;
}
/**
*
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules
* per service.
*
*
* @return The placement strategy objects to use for tasks in your service. You can specify a maximum of five
* strategy rules per service.
*/
public java.util.List getPlacementStrategy() {
if (placementStrategy == null) {
placementStrategy = new com.amazonaws.internal.SdkInternalList();
}
return placementStrategy;
}
/**
*
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules
* per service.
*
*
* @param placementStrategy
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five
* strategy rules per service.
*/
public void setPlacementStrategy(java.util.Collection placementStrategy) {
if (placementStrategy == null) {
this.placementStrategy = null;
return;
}
this.placementStrategy = new com.amazonaws.internal.SdkInternalList(placementStrategy);
}
/**
*
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules
* per service.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setPlacementStrategy(java.util.Collection)} or {@link #withPlacementStrategy(java.util.Collection)} if
* you want to override the existing values.
*
*
* @param placementStrategy
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five
* strategy rules per service.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withPlacementStrategy(PlacementStrategy... placementStrategy) {
if (this.placementStrategy == null) {
setPlacementStrategy(new com.amazonaws.internal.SdkInternalList(placementStrategy.length));
}
for (PlacementStrategy ele : placementStrategy) {
this.placementStrategy.add(ele);
}
return this;
}
/**
*
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules
* per service.
*
*
* @param placementStrategy
* The placement strategy objects to use for tasks in your service. You can specify a maximum of five
* strategy rules per service.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withPlacementStrategy(java.util.Collection placementStrategy) {
setPlacementStrategy(placementStrategy);
return this;
}
/**
*
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported for
* other network modes. For more information, see Task Networking in
* the Amazon Elastic Container Service Developer Guide.
*
*
* @param networkConfiguration
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported
* for other network modes. For more information, see Task
* Networking in the Amazon Elastic Container Service Developer Guide.
*/
public void setNetworkConfiguration(NetworkConfiguration networkConfiguration) {
this.networkConfiguration = networkConfiguration;
}
/**
*
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported for
* other network modes. For more information, see Task Networking in
* the Amazon Elastic Container Service Developer Guide.
*
*
* @return The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported
* for other network modes. For more information, see Task
* Networking in the Amazon Elastic Container Service Developer Guide.
*/
public NetworkConfiguration getNetworkConfiguration() {
return this.networkConfiguration;
}
/**
*
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported for
* other network modes. For more information, see Task Networking in
* the Amazon Elastic Container Service Developer Guide.
*
*
* @param networkConfiguration
* The network configuration for the service. This parameter is required for task definitions that use the
* awsvpc
network mode to receive their own elastic network interface, and it is not supported
* for other network modes. For more information, see Task
* Networking in the Amazon Elastic Container Service Developer Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withNetworkConfiguration(NetworkConfiguration networkConfiguration) {
setNetworkConfiguration(networkConfiguration);
return this;
}
/**
*
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is configured
* to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace
* period value, the default value of 0
is used.
*
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
* scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as
* unhealthy and stopping them before they have time to come up.
*
*
* @param healthCheckGracePeriodSeconds
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is
* configured to use a load balancer. If your service has a load balancer defined and you don't specify a
* health check grace period value, the default value of 0
is used.
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS
* service scheduler ignores health check status. This grace period can prevent the service scheduler from
* marking tasks as unhealthy and stopping them before they have time to come up.
*/
public void setHealthCheckGracePeriodSeconds(Integer healthCheckGracePeriodSeconds) {
this.healthCheckGracePeriodSeconds = healthCheckGracePeriodSeconds;
}
/**
*
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is configured
* to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace
* period value, the default value of 0
is used.
*
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
* scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as
* unhealthy and stopping them before they have time to come up.
*
*
* @return The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic
* Load Balancing target health checks after a task has first started. This is only used when your service
* is configured to use a load balancer. If your service has a load balancer defined and you don't specify a
* health check grace period value, the default value of 0
is used.
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you
* can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS
* service scheduler ignores health check status. This grace period can prevent the service scheduler from
* marking tasks as unhealthy and stopping them before they have time to come up.
*/
public Integer getHealthCheckGracePeriodSeconds() {
return this.healthCheckGracePeriodSeconds;
}
/**
*
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is configured
* to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace
* period value, the default value of 0
is used.
*
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
* scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as
* unhealthy and stopping them before they have time to come up.
*
*
* @param healthCheckGracePeriodSeconds
* The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load
* Balancing target health checks after a task has first started. This is only used when your service is
* configured to use a load balancer. If your service has a load balancer defined and you don't specify a
* health check grace period value, the default value of 0
is used.
*
* If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can
* specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS
* service scheduler ignores health check status. This grace period can prevent the service scheduler from
* marking tasks as unhealthy and stopping them before they have time to come up.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withHealthCheckGracePeriodSeconds(Integer healthCheckGracePeriodSeconds) {
setHealthCheckGracePeriodSeconds(healthCheckGracePeriodSeconds);
return this;
}
/**
*
* The scheduling strategy to use for the service. For more information, see Services.
*
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your
* cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement
* strategies and constraints to customize task placement decisions. This scheduler strategy is required if the
* service is using the CODE_DEPLOY
or EXTERNAL
deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container instance
* that meets all of the task placement constraints that you specify in your cluster. The service scheduler also
* evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement
* constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task
* placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
*
*
* @param schedulingStrategy
* The scheduling strategy to use for the service. For more information, see Services.
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks
* across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can
* use task placement strategies and constraints to customize task placement decisions. This scheduler
* strategy is required if the service is using the CODE_DEPLOY
or EXTERNAL
* deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container
* instance that meets all of the task placement constraints that you specify in your cluster. The service
* scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not
* meet the placement constraints. When you're using this strategy, you don't need to specify a desired
* number of tasks, a task placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
* @see SchedulingStrategy
*/
public void setSchedulingStrategy(String schedulingStrategy) {
this.schedulingStrategy = schedulingStrategy;
}
/**
*
* The scheduling strategy to use for the service. For more information, see Services.
*
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your
* cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement
* strategies and constraints to customize task placement decisions. This scheduler strategy is required if the
* service is using the CODE_DEPLOY
or EXTERNAL
deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container instance
* that meets all of the task placement constraints that you specify in your cluster. The service scheduler also
* evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement
* constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task
* placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
*
*
* @return The scheduling strategy to use for the service. For more information, see Services.
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks
* across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can
* use task placement strategies and constraints to customize task placement decisions. This scheduler
* strategy is required if the service is using the CODE_DEPLOY
or EXTERNAL
* deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container
* instance that meets all of the task placement constraints that you specify in your cluster. The service
* scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not
* meet the placement constraints. When you're using this strategy, you don't need to specify a desired
* number of tasks, a task placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
* @see SchedulingStrategy
*/
public String getSchedulingStrategy() {
return this.schedulingStrategy;
}
/**
*
* The scheduling strategy to use for the service. For more information, see Services.
*
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your
* cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement
* strategies and constraints to customize task placement decisions. This scheduler strategy is required if the
* service is using the CODE_DEPLOY
or EXTERNAL
deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container instance
* that meets all of the task placement constraints that you specify in your cluster. The service scheduler also
* evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement
* constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task
* placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
*
*
* @param schedulingStrategy
* The scheduling strategy to use for the service. For more information, see Services.
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks
* across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can
* use task placement strategies and constraints to customize task placement decisions. This scheduler
* strategy is required if the service is using the CODE_DEPLOY
or EXTERNAL
* deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container
* instance that meets all of the task placement constraints that you specify in your cluster. The service
* scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not
* meet the placement constraints. When you're using this strategy, you don't need to specify a desired
* number of tasks, a task placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see SchedulingStrategy
*/
public CreateServiceRequest withSchedulingStrategy(String schedulingStrategy) {
setSchedulingStrategy(schedulingStrategy);
return this;
}
/**
*
* The scheduling strategy to use for the service. For more information, see Services.
*
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your
* cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement
* strategies and constraints to customize task placement decisions. This scheduler strategy is required if the
* service is using the CODE_DEPLOY
or EXTERNAL
deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container instance
* that meets all of the task placement constraints that you specify in your cluster. The service scheduler also
* evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement
* constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task
* placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
*
*
* @param schedulingStrategy
* The scheduling strategy to use for the service. For more information, see Services.
*
* There are two service scheduler strategies available:
*
*
* -
*
* REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks
* across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can
* use task placement strategies and constraints to customize task placement decisions. This scheduler
* strategy is required if the service is using the CODE_DEPLOY
or EXTERNAL
* deployment controller types.
*
*
* -
*
* DAEMON
-The daemon scheduling strategy deploys exactly one task on each active container
* instance that meets all of the task placement constraints that you specify in your cluster. The service
* scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not
* meet the placement constraints. When you're using this strategy, you don't need to specify a desired
* number of tasks, a task placement strategy, or use Service Auto Scaling policies.
*
*
*
* Tasks using the Fargate launch type or the CODE_DEPLOY
or EXTERNAL
deployment
* controller types don't support the DAEMON
scheduling strategy.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see SchedulingStrategy
*/
public CreateServiceRequest withSchedulingStrategy(SchedulingStrategy schedulingStrategy) {
this.schedulingStrategy = schedulingStrategy.toString();
return this;
}
/**
*
* The deployment controller to use for the service.
*
*
* @param deploymentController
* The deployment controller to use for the service.
*/
public void setDeploymentController(DeploymentController deploymentController) {
this.deploymentController = deploymentController;
}
/**
*
* The deployment controller to use for the service.
*
*
* @return The deployment controller to use for the service.
*/
public DeploymentController getDeploymentController() {
return this.deploymentController;
}
/**
*
* The deployment controller to use for the service.
*
*
* @param deploymentController
* The deployment controller to use for the service.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withDeploymentController(DeploymentController deploymentController) {
setDeploymentController(deploymentController);
return this;
}
/**
*
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key
* and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this
* prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*
*
* @return The metadata that you apply to the service to help you categorize and organize them. Each tag consists of
* a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as
* well.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a
* prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or
* values with this prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*/
public java.util.List getTags() {
if (tags == null) {
tags = new com.amazonaws.internal.SdkInternalList();
}
return tags;
}
/**
*
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key
* and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this
* prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*
*
* @param tags
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of
* a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as
* well.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with
* this prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*/
public void setTags(java.util.Collection tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new com.amazonaws.internal.SdkInternalList(tags);
}
/**
*
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key
* and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this
* prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setTags(java.util.Collection)} or {@link #withTags(java.util.Collection)} if you want to override the
* existing values.
*
*
* @param tags
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of
* a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as
* well.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with
* this prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withTags(Tag... tags) {
if (this.tags == null) {
setTags(new com.amazonaws.internal.SdkInternalList(tags.length));
}
for (Tag ele : tags) {
this.tags.add(ele);
}
return this;
}
/**
*
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key
* and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this
* prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
*
*
* @param tags
* The metadata that you apply to the service to help you categorize and organize them. Each tag consists of
* a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as
* well.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case-sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with
* this prefix. Tags with this prefix do not count against your tags per resource limit.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withTags(java.util.Collection tags) {
setTags(tags);
return this;
}
/**
*
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see
* Tagging Your Amazon ECS
* Resources in the Amazon Elastic Container Service Developer Guide.
*
*
* @param enableECSManagedTags
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more
* information, see Tagging Your Amazon
* ECS Resources in the Amazon Elastic Container Service Developer Guide.
*/
public void setEnableECSManagedTags(Boolean enableECSManagedTags) {
this.enableECSManagedTags = enableECSManagedTags;
}
/**
*
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see
* Tagging Your Amazon ECS
* Resources in the Amazon Elastic Container Service Developer Guide.
*
*
* @return Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more
* information, see Tagging Your
* Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.
*/
public Boolean getEnableECSManagedTags() {
return this.enableECSManagedTags;
}
/**
*
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see
* Tagging Your Amazon ECS
* Resources in the Amazon Elastic Container Service Developer Guide.
*
*
* @param enableECSManagedTags
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more
* information, see Tagging Your Amazon
* ECS Resources in the Amazon Elastic Container Service Developer Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateServiceRequest withEnableECSManagedTags(Boolean enableECSManagedTags) {
setEnableECSManagedTags(enableECSManagedTags);
return this;
}
/**
*
* Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see
* Tagging Your Amazon ECS
* Resources in the Amazon Elastic Container Service Developer Guide.
*
*
* @return Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more
* information, see Tagging Your
* Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.
*/
public Boolean isEnableECSManagedTags() {
return this.enableECSManagedTags;
}
/**
*
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If
* no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service
* during service creation. To add tags to a task after service creation, use the TagResource API action.
*
*
* @param propagateTags
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the
* service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks
* within the service during service creation. To add tags to a task after service creation, use the
* TagResource API action.
* @see PropagateTags
*/
public void setPropagateTags(String propagateTags) {
this.propagateTags = propagateTags;
}
/**
*
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If
* no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service
* during service creation. To add tags to a task after service creation, use the TagResource API action.
*
*
* @return Specifies whether to propagate the tags from the task definition or the service to the tasks in the
* service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks
* within the service during service creation. To add tags to a task after service creation, use the
* TagResource API action.
* @see PropagateTags
*/
public String getPropagateTags() {
return this.propagateTags;
}
/**
*
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If
* no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service
* during service creation. To add tags to a task after service creation, use the TagResource API action.
*
*
* @param propagateTags
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the
* service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks
* within the service during service creation. To add tags to a task after service creation, use the
* TagResource API action.
* @return Returns a reference to this object so that method calls can be chained together.
* @see PropagateTags
*/
public CreateServiceRequest withPropagateTags(String propagateTags) {
setPropagateTags(propagateTags);
return this;
}
/**
*
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If
* no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service
* during service creation. To add tags to a task after service creation, use the TagResource API action.
*
*
* @param propagateTags
* Specifies whether to propagate the tags from the task definition or the service to the tasks in the
* service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks
* within the service during service creation. To add tags to a task after service creation, use the
* TagResource API action.
* @return Returns a reference to this object so that method calls can be chained together.
* @see PropagateTags
*/
public CreateServiceRequest withPropagateTags(PropagateTags propagateTags) {
this.propagateTags = propagateTags.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getCluster() != null)
sb.append("Cluster: ").append(getCluster()).append(",");
if (getServiceName() != null)
sb.append("ServiceName: ").append(getServiceName()).append(",");
if (getTaskDefinition() != null)
sb.append("TaskDefinition: ").append(getTaskDefinition()).append(",");
if (getLoadBalancers() != null)
sb.append("LoadBalancers: ").append(getLoadBalancers()).append(",");
if (getServiceRegistries() != null)
sb.append("ServiceRegistries: ").append(getServiceRegistries()).append(",");
if (getDesiredCount() != null)
sb.append("DesiredCount: ").append(getDesiredCount()).append(",");
if (getClientToken() != null)
sb.append("ClientToken: ").append(getClientToken()).append(",");
if (getLaunchType() != null)
sb.append("LaunchType: ").append(getLaunchType()).append(",");
if (getCapacityProviderStrategy() != null)
sb.append("CapacityProviderStrategy: ").append(getCapacityProviderStrategy()).append(",");
if (getPlatformVersion() != null)
sb.append("PlatformVersion: ").append(getPlatformVersion()).append(",");
if (getRole() != null)
sb.append("Role: ").append(getRole()).append(",");
if (getDeploymentConfiguration() != null)
sb.append("DeploymentConfiguration: ").append(getDeploymentConfiguration()).append(",");
if (getPlacementConstraints() != null)
sb.append("PlacementConstraints: ").append(getPlacementConstraints()).append(",");
if (getPlacementStrategy() != null)
sb.append("PlacementStrategy: ").append(getPlacementStrategy()).append(",");
if (getNetworkConfiguration() != null)
sb.append("NetworkConfiguration: ").append(getNetworkConfiguration()).append(",");
if (getHealthCheckGracePeriodSeconds() != null)
sb.append("HealthCheckGracePeriodSeconds: ").append(getHealthCheckGracePeriodSeconds()).append(",");
if (getSchedulingStrategy() != null)
sb.append("SchedulingStrategy: ").append(getSchedulingStrategy()).append(",");
if (getDeploymentController() != null)
sb.append("DeploymentController: ").append(getDeploymentController()).append(",");
if (getTags() != null)
sb.append("Tags: ").append(getTags()).append(",");
if (getEnableECSManagedTags() != null)
sb.append("EnableECSManagedTags: ").append(getEnableECSManagedTags()).append(",");
if (getPropagateTags() != null)
sb.append("PropagateTags: ").append(getPropagateTags());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateServiceRequest == false)
return false;
CreateServiceRequest other = (CreateServiceRequest) obj;
if (other.getCluster() == null ^ this.getCluster() == null)
return false;
if (other.getCluster() != null && other.getCluster().equals(this.getCluster()) == false)
return false;
if (other.getServiceName() == null ^ this.getServiceName() == null)
return false;
if (other.getServiceName() != null && other.getServiceName().equals(this.getServiceName()) == false)
return false;
if (other.getTaskDefinition() == null ^ this.getTaskDefinition() == null)
return false;
if (other.getTaskDefinition() != null && other.getTaskDefinition().equals(this.getTaskDefinition()) == false)
return false;
if (other.getLoadBalancers() == null ^ this.getLoadBalancers() == null)
return false;
if (other.getLoadBalancers() != null && other.getLoadBalancers().equals(this.getLoadBalancers()) == false)
return false;
if (other.getServiceRegistries() == null ^ this.getServiceRegistries() == null)
return false;
if (other.getServiceRegistries() != null && other.getServiceRegistries().equals(this.getServiceRegistries()) == false)
return false;
if (other.getDesiredCount() == null ^ this.getDesiredCount() == null)
return false;
if (other.getDesiredCount() != null && other.getDesiredCount().equals(this.getDesiredCount()) == false)
return false;
if (other.getClientToken() == null ^ this.getClientToken() == null)
return false;
if (other.getClientToken() != null && other.getClientToken().equals(this.getClientToken()) == false)
return false;
if (other.getLaunchType() == null ^ this.getLaunchType() == null)
return false;
if (other.getLaunchType() != null && other.getLaunchType().equals(this.getLaunchType()) == false)
return false;
if (other.getCapacityProviderStrategy() == null ^ this.getCapacityProviderStrategy() == null)
return false;
if (other.getCapacityProviderStrategy() != null && other.getCapacityProviderStrategy().equals(this.getCapacityProviderStrategy()) == false)
return false;
if (other.getPlatformVersion() == null ^ this.getPlatformVersion() == null)
return false;
if (other.getPlatformVersion() != null && other.getPlatformVersion().equals(this.getPlatformVersion()) == false)
return false;
if (other.getRole() == null ^ this.getRole() == null)
return false;
if (other.getRole() != null && other.getRole().equals(this.getRole()) == false)
return false;
if (other.getDeploymentConfiguration() == null ^ this.getDeploymentConfiguration() == null)
return false;
if (other.getDeploymentConfiguration() != null && other.getDeploymentConfiguration().equals(this.getDeploymentConfiguration()) == false)
return false;
if (other.getPlacementConstraints() == null ^ this.getPlacementConstraints() == null)
return false;
if (other.getPlacementConstraints() != null && other.getPlacementConstraints().equals(this.getPlacementConstraints()) == false)
return false;
if (other.getPlacementStrategy() == null ^ this.getPlacementStrategy() == null)
return false;
if (other.getPlacementStrategy() != null && other.getPlacementStrategy().equals(this.getPlacementStrategy()) == false)
return false;
if (other.getNetworkConfiguration() == null ^ this.getNetworkConfiguration() == null)
return false;
if (other.getNetworkConfiguration() != null && other.getNetworkConfiguration().equals(this.getNetworkConfiguration()) == false)
return false;
if (other.getHealthCheckGracePeriodSeconds() == null ^ this.getHealthCheckGracePeriodSeconds() == null)
return false;
if (other.getHealthCheckGracePeriodSeconds() != null
&& other.getHealthCheckGracePeriodSeconds().equals(this.getHealthCheckGracePeriodSeconds()) == false)
return false;
if (other.getSchedulingStrategy() == null ^ this.getSchedulingStrategy() == null)
return false;
if (other.getSchedulingStrategy() != null && other.getSchedulingStrategy().equals(this.getSchedulingStrategy()) == false)
return false;
if (other.getDeploymentController() == null ^ this.getDeploymentController() == null)
return false;
if (other.getDeploymentController() != null && other.getDeploymentController().equals(this.getDeploymentController()) == false)
return false;
if (other.getTags() == null ^ this.getTags() == null)
return false;
if (other.getTags() != null && other.getTags().equals(this.getTags()) == false)
return false;
if (other.getEnableECSManagedTags() == null ^ this.getEnableECSManagedTags() == null)
return false;
if (other.getEnableECSManagedTags() != null && other.getEnableECSManagedTags().equals(this.getEnableECSManagedTags()) == false)
return false;
if (other.getPropagateTags() == null ^ this.getPropagateTags() == null)
return false;
if (other.getPropagateTags() != null && other.getPropagateTags().equals(this.getPropagateTags()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getCluster() == null) ? 0 : getCluster().hashCode());
hashCode = prime * hashCode + ((getServiceName() == null) ? 0 : getServiceName().hashCode());
hashCode = prime * hashCode + ((getTaskDefinition() == null) ? 0 : getTaskDefinition().hashCode());
hashCode = prime * hashCode + ((getLoadBalancers() == null) ? 0 : getLoadBalancers().hashCode());
hashCode = prime * hashCode + ((getServiceRegistries() == null) ? 0 : getServiceRegistries().hashCode());
hashCode = prime * hashCode + ((getDesiredCount() == null) ? 0 : getDesiredCount().hashCode());
hashCode = prime * hashCode + ((getClientToken() == null) ? 0 : getClientToken().hashCode());
hashCode = prime * hashCode + ((getLaunchType() == null) ? 0 : getLaunchType().hashCode());
hashCode = prime * hashCode + ((getCapacityProviderStrategy() == null) ? 0 : getCapacityProviderStrategy().hashCode());
hashCode = prime * hashCode + ((getPlatformVersion() == null) ? 0 : getPlatformVersion().hashCode());
hashCode = prime * hashCode + ((getRole() == null) ? 0 : getRole().hashCode());
hashCode = prime * hashCode + ((getDeploymentConfiguration() == null) ? 0 : getDeploymentConfiguration().hashCode());
hashCode = prime * hashCode + ((getPlacementConstraints() == null) ? 0 : getPlacementConstraints().hashCode());
hashCode = prime * hashCode + ((getPlacementStrategy() == null) ? 0 : getPlacementStrategy().hashCode());
hashCode = prime * hashCode + ((getNetworkConfiguration() == null) ? 0 : getNetworkConfiguration().hashCode());
hashCode = prime * hashCode + ((getHealthCheckGracePeriodSeconds() == null) ? 0 : getHealthCheckGracePeriodSeconds().hashCode());
hashCode = prime * hashCode + ((getSchedulingStrategy() == null) ? 0 : getSchedulingStrategy().hashCode());
hashCode = prime * hashCode + ((getDeploymentController() == null) ? 0 : getDeploymentController().hashCode());
hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode());
hashCode = prime * hashCode + ((getEnableECSManagedTags() == null) ? 0 : getEnableECSManagedTags().hashCode());
hashCode = prime * hashCode + ((getPropagateTags() == null) ? 0 : getPropagateTags().hashCode());
return hashCode;
}
@Override
public CreateServiceRequest clone() {
return (CreateServiceRequest) super.clone();
}
}