com.amazonaws.services.ecs.model.DeploymentConfiguration Maven / Gradle / Ivy
Show all versions of aws-java-sdk-ecs Show documentation
/*
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.ecs.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Optional deployment parameters that control how many tasks run during a deployment and the ordering of stopping and
* starting tasks.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DeploymentConfiguration implements Serializable, Cloneable, StructuredPojo {
/**
*
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service can't
* reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed
* state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service
* is rolled back to the last deployment that completed successfully. For more information, see Rolling update in
* the Amazon Elastic Container Service Developer Guide
*
*/
private DeploymentCircuitBreaker deploymentCircuitBreaker;
/**
*
* If a service is using the rolling update (ECS
) deployment type, the maximumPercent
* parameter represents an upper limit on the number of your service's tasks that are allowed in the
* RUNNING
or PENDING
state during a deployment, as a percentage of the
* desiredCount
(rounded down to the nearest integer). This parameter enables you to define the
* deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a
* desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start
* four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are
* available). The default maximumPercent
value for a service using the REPLICA
service
* scheduler is 200%.
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used
* to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state
* while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate
* launch type, the maximum percent value is not used, although it is returned when describing your service.
*
*/
private Integer maximumPercent;
/**
*
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must
* remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
* (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster
* capacity. For example, if your service has a desiredCount
of four tasks and a
* minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster
* capacity before starting two new tasks.
*
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their health
* checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds
* after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent
* total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will wait for
* the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is
* considered healthy when all essential containers within the task have passed their health checks. The amount of
* time the service scheduler can wait for is determined by the container health check settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the load
* balancer target group health check to return a healthy status before counting the task towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both the
* task to reach a healthy status and the load balancer target group health check to return a healthy status before
* counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default
* value and is used to define the lower limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If a service is
* using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running
* tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned
* when describing your service.
*
*/
private Integer minimumHealthyPercent;
/**
*
* Information about the CloudWatch alarms.
*
*/
private DeploymentAlarms alarms;
/**
*
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service can't
* reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed
* state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service
* is rolled back to the last deployment that completed successfully. For more information, see Rolling update in
* the Amazon Elastic Container Service Developer Guide
*
*
* @param deploymentCircuitBreaker
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service
* can't reach a steady state. If you use the deployment circuit breaker, a service deployment will
* transition to a failed state and stop launching new tasks. If you use the rollback option, when a service
* deployment fails, the service is rolled back to the last deployment that completed successfully. For more
* information, see Rolling
* update in the Amazon Elastic Container Service Developer Guide
*/
public void setDeploymentCircuitBreaker(DeploymentCircuitBreaker deploymentCircuitBreaker) {
this.deploymentCircuitBreaker = deploymentCircuitBreaker;
}
/**
*
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service can't
* reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed
* state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service
* is rolled back to the last deployment that completed successfully. For more information, see Rolling update in
* the Amazon Elastic Container Service Developer Guide
*
*
* @return
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service
* can't reach a steady state. If you use the deployment circuit breaker, a service deployment will
* transition to a failed state and stop launching new tasks. If you use the rollback option, when a service
* deployment fails, the service is rolled back to the last deployment that completed successfully. For more
* information, see Rolling
* update in the Amazon Elastic Container Service Developer Guide
*/
public DeploymentCircuitBreaker getDeploymentCircuitBreaker() {
return this.deploymentCircuitBreaker;
}
/**
*
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service can't
* reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed
* state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service
* is rolled back to the last deployment that completed successfully. For more information, see Rolling update in
* the Amazon Elastic Container Service Developer Guide
*
*
* @param deploymentCircuitBreaker
*
* The deployment circuit breaker can only be used for services using the rolling update (ECS
)
* deployment type.
*
*
*
* The deployment circuit breaker determines whether a service deployment will fail if the service
* can't reach a steady state. If you use the deployment circuit breaker, a service deployment will
* transition to a failed state and stop launching new tasks. If you use the rollback option, when a service
* deployment fails, the service is rolled back to the last deployment that completed successfully. For more
* information, see Rolling
* update in the Amazon Elastic Container Service Developer Guide
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeploymentConfiguration withDeploymentCircuitBreaker(DeploymentCircuitBreaker deploymentCircuitBreaker) {
setDeploymentCircuitBreaker(deploymentCircuitBreaker);
return this;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the maximumPercent
* parameter represents an upper limit on the number of your service's tasks that are allowed in the
* RUNNING
or PENDING
state during a deployment, as a percentage of the
* desiredCount
(rounded down to the nearest integer). This parameter enables you to define the
* deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a
* desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start
* four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are
* available). The default maximumPercent
value for a service using the REPLICA
service
* scheduler is 200%.
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used
* to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state
* while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate
* launch type, the maximum percent value is not used, although it is returned when describing your service.
*
*
* @param maximumPercent
* If a service is using the rolling update (ECS
) deployment type, the
* maximumPercent
parameter represents an upper limit on the number of your service's tasks that
* are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage
* of the desiredCount
(rounded down to the nearest integer). This parameter enables you to
* define the deployment batch size. For example, if your service is using the REPLICA
service
* scheduler and has a desiredCount
of four tasks and a maximumPercent
value of
* 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the
* cluster resources required to do this are available). The default maximumPercent
value for a
* service using the REPLICA
service scheduler is 200%.
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment
* types and tasks that use the EC2 launch type, the maximum percent value is set to the default value
* and is used to define the upper limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If the
* tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is
* returned when describing your service.
*/
public void setMaximumPercent(Integer maximumPercent) {
this.maximumPercent = maximumPercent;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the maximumPercent
* parameter represents an upper limit on the number of your service's tasks that are allowed in the
* RUNNING
or PENDING
state during a deployment, as a percentage of the
* desiredCount
(rounded down to the nearest integer). This parameter enables you to define the
* deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a
* desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start
* four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are
* available). The default maximumPercent
value for a service using the REPLICA
service
* scheduler is 200%.
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used
* to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state
* while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate
* launch type, the maximum percent value is not used, although it is returned when describing your service.
*
*
* @return If a service is using the rolling update (ECS
) deployment type, the
* maximumPercent
parameter represents an upper limit on the number of your service's tasks
* that are allowed in the RUNNING
or PENDING
state during a deployment, as a
* percentage of the desiredCount
(rounded down to the nearest integer). This parameter enables
* you to define the deployment batch size. For example, if your service is using the REPLICA
* service scheduler and has a desiredCount
of four tasks and a maximumPercent
* value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that
* the cluster resources required to do this are available). The default maximumPercent
value
* for a service using the REPLICA
service scheduler is 200%.
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
* deployment types and tasks that use the EC2 launch type, the maximum percent value is set to the
* default value and is used to define the upper limit on the number of the tasks in the service that remain
* in the RUNNING
state while the container instances are in the DRAINING
state.
* If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although
* it is returned when describing your service.
*/
public Integer getMaximumPercent() {
return this.maximumPercent;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the maximumPercent
* parameter represents an upper limit on the number of your service's tasks that are allowed in the
* RUNNING
or PENDING
state during a deployment, as a percentage of the
* desiredCount
(rounded down to the nearest integer). This parameter enables you to define the
* deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a
* desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start
* four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are
* available). The default maximumPercent
value for a service using the REPLICA
service
* scheduler is 200%.
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used
* to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state
* while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate
* launch type, the maximum percent value is not used, although it is returned when describing your service.
*
*
* @param maximumPercent
* If a service is using the rolling update (ECS
) deployment type, the
* maximumPercent
parameter represents an upper limit on the number of your service's tasks that
* are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage
* of the desiredCount
(rounded down to the nearest integer). This parameter enables you to
* define the deployment batch size. For example, if your service is using the REPLICA
service
* scheduler and has a desiredCount
of four tasks and a maximumPercent
value of
* 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the
* cluster resources required to do this are available). The default maximumPercent
value for a
* service using the REPLICA
service scheduler is 200%.
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment
* types and tasks that use the EC2 launch type, the maximum percent value is set to the default value
* and is used to define the upper limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If the
* tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is
* returned when describing your service.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeploymentConfiguration withMaximumPercent(Integer maximumPercent) {
setMaximumPercent(maximumPercent);
return this;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must
* remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
* (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster
* capacity. For example, if your service has a desiredCount
of four tasks and a
* minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster
* capacity before starting two new tasks.
*
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their health
* checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds
* after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent
* total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will wait for
* the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is
* considered healthy when all essential containers within the task have passed their health checks. The amount of
* time the service scheduler can wait for is determined by the container health check settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the load
* balancer target group health check to return a healthy status before counting the task towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both the
* task to reach a healthy status and the load balancer target group health check to return a healthy status before
* counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default
* value and is used to define the lower limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If a service is
* using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running
* tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned
* when describing your service.
*
*
* @param minimumHealthyPercent
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that
* must remain in the RUNNING
state during a deployment, as a percentage of the
* desiredCount
(rounded up to the nearest integer). This parameter enables you to deploy
* without using additional cluster capacity. For example, if your service has a desiredCount
of
* four tasks and a minimumHealthyPercent
of 50%, the service scheduler may stop two existing
* tasks to free up cluster capacity before starting two new tasks.
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their
* health checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40
* seconds after a task reaches a RUNNING
state before the task is counted towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will
* wait for the task to reach a healthy status before counting it towards the minimum healthy percent total.
* A task is considered healthy when all essential containers within the task have passed their health
* checks. The amount of time the service scheduler can wait for is determined by the container health check
* settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the
* load balancer target group health check to return a healthy status before counting the task towards the
* minimum healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both
* the task to reach a healthy status and the load balancer target group health check to return a healthy
* status before counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment
* types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set
* to the default value and is used to define the lower limit on the number of the tasks in the service that
* remain in the RUNNING
state while the container instances are in the DRAINING
* state. If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
* deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value
* is not used, although it is returned when describing your service.
*/
public void setMinimumHealthyPercent(Integer minimumHealthyPercent) {
this.minimumHealthyPercent = minimumHealthyPercent;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must
* remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
* (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster
* capacity. For example, if your service has a desiredCount
of four tasks and a
* minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster
* capacity before starting two new tasks.
*
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their health
* checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds
* after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent
* total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will wait for
* the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is
* considered healthy when all essential containers within the task have passed their health checks. The amount of
* time the service scheduler can wait for is determined by the container health check settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the load
* balancer target group health check to return a healthy status before counting the task towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both the
* task to reach a healthy status and the load balancer target group health check to return a healthy status before
* counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default
* value and is used to define the lower limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If a service is
* using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running
* tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned
* when describing your service.
*
*
* @return If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that
* must remain in the RUNNING
state during a deployment, as a percentage of the
* desiredCount
(rounded up to the nearest integer). This parameter enables you to deploy
* without using additional cluster capacity. For example, if your service has a desiredCount
* of four tasks and a minimumHealthyPercent
of 50%, the service scheduler may stop two
* existing tasks to free up cluster capacity before starting two new tasks.
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their
* health checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40
* seconds after a task reaches a RUNNING
state before the task is counted towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will
* wait for the task to reach a healthy status before counting it towards the minimum healthy percent total.
* A task is considered healthy when all essential containers within the task have passed their health
* checks. The amount of time the service scheduler can wait for is determined by the container health check
* settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for
* the load balancer target group health check to return a healthy status before counting the task towards
* the minimum healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for
* both the task to reach a healthy status and the load balancer target group health check to return a
* healthy status before counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
* deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent
* value is set to the default value and is used to define the lower limit on the number of the tasks in the
* service that remain in the RUNNING
state while the container instances are in the
* DRAINING
state. If a service is using either the blue/green (CODE_DEPLOY
) or
* EXTERNAL
deployment types and is running tasks that use the Fargate launch type, the minimum
* healthy percent value is not used, although it is returned when describing your service.
*/
public Integer getMinimumHealthyPercent() {
return this.minimumHealthyPercent;
}
/**
*
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must
* remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
* (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster
* capacity. For example, if your service has a desiredCount
of four tasks and a
* minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster
* capacity before starting two new tasks.
*
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their health
* checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds
* after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent
* total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will wait for
* the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is
* considered healthy when all essential containers within the task have passed their health checks. The amount of
* time the service scheduler can wait for is determined by the container health check settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the load
* balancer target group health check to return a healthy status before counting the task towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both the
* task to reach a healthy status and the load balancer target group health check to return a healthy status before
* counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types
* and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default
* value and is used to define the lower limit on the number of the tasks in the service that remain in the
* RUNNING
state while the container instances are in the DRAINING
state. If a service is
* using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running
* tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned
* when describing your service.
*
*
* @param minimumHealthyPercent
* If a service is using the rolling update (ECS
) deployment type, the
* minimumHealthyPercent
represents a lower limit on the number of your service's tasks that
* must remain in the RUNNING
state during a deployment, as a percentage of the
* desiredCount
(rounded up to the nearest integer). This parameter enables you to deploy
* without using additional cluster capacity. For example, if your service has a desiredCount
of
* four tasks and a minimumHealthyPercent
of 50%, the service scheduler may stop two existing
* tasks to free up cluster capacity before starting two new tasks.
*
* For services that do not use a load balancer, the following should be noted:
*
*
* -
*
* A service is considered healthy if all essential containers within the tasks in the service pass their
* health checks.
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for 40
* seconds after a task reaches a RUNNING
state before the task is counted towards the minimum
* healthy percent total.
*
*
* -
*
* If a task has one or more essential containers with a health check defined, the service scheduler will
* wait for the task to reach a healthy status before counting it towards the minimum healthy percent total.
* A task is considered healthy when all essential containers within the task have passed their health
* checks. The amount of time the service scheduler can wait for is determined by the container health check
* settings.
*
*
*
*
* For services are that do use a load balancer, the following should be noted:
*
*
* -
*
* If a task has no essential containers with a health check defined, the service scheduler will wait for the
* load balancer target group health check to return a healthy status before counting the task towards the
* minimum healthy percent total.
*
*
* -
*
* If a task has an essential container with a health check defined, the service scheduler will wait for both
* the task to reach a healthy status and the load balancer target group health check to return a healthy
* status before counting the task towards the minimum healthy percent total.
*
*
*
*
* If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment
* types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set
* to the default value and is used to define the lower limit on the number of the tasks in the service that
* remain in the RUNNING
state while the container instances are in the DRAINING
* state. If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
* deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value
* is not used, although it is returned when describing your service.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeploymentConfiguration withMinimumHealthyPercent(Integer minimumHealthyPercent) {
setMinimumHealthyPercent(minimumHealthyPercent);
return this;
}
/**
*
* Information about the CloudWatch alarms.
*
*
* @param alarms
* Information about the CloudWatch alarms.
*/
public void setAlarms(DeploymentAlarms alarms) {
this.alarms = alarms;
}
/**
*
* Information about the CloudWatch alarms.
*
*
* @return Information about the CloudWatch alarms.
*/
public DeploymentAlarms getAlarms() {
return this.alarms;
}
/**
*
* Information about the CloudWatch alarms.
*
*
* @param alarms
* Information about the CloudWatch alarms.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeploymentConfiguration withAlarms(DeploymentAlarms alarms) {
setAlarms(alarms);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getDeploymentCircuitBreaker() != null)
sb.append("DeploymentCircuitBreaker: ").append(getDeploymentCircuitBreaker()).append(",");
if (getMaximumPercent() != null)
sb.append("MaximumPercent: ").append(getMaximumPercent()).append(",");
if (getMinimumHealthyPercent() != null)
sb.append("MinimumHealthyPercent: ").append(getMinimumHealthyPercent()).append(",");
if (getAlarms() != null)
sb.append("Alarms: ").append(getAlarms());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DeploymentConfiguration == false)
return false;
DeploymentConfiguration other = (DeploymentConfiguration) obj;
if (other.getDeploymentCircuitBreaker() == null ^ this.getDeploymentCircuitBreaker() == null)
return false;
if (other.getDeploymentCircuitBreaker() != null && other.getDeploymentCircuitBreaker().equals(this.getDeploymentCircuitBreaker()) == false)
return false;
if (other.getMaximumPercent() == null ^ this.getMaximumPercent() == null)
return false;
if (other.getMaximumPercent() != null && other.getMaximumPercent().equals(this.getMaximumPercent()) == false)
return false;
if (other.getMinimumHealthyPercent() == null ^ this.getMinimumHealthyPercent() == null)
return false;
if (other.getMinimumHealthyPercent() != null && other.getMinimumHealthyPercent().equals(this.getMinimumHealthyPercent()) == false)
return false;
if (other.getAlarms() == null ^ this.getAlarms() == null)
return false;
if (other.getAlarms() != null && other.getAlarms().equals(this.getAlarms()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getDeploymentCircuitBreaker() == null) ? 0 : getDeploymentCircuitBreaker().hashCode());
hashCode = prime * hashCode + ((getMaximumPercent() == null) ? 0 : getMaximumPercent().hashCode());
hashCode = prime * hashCode + ((getMinimumHealthyPercent() == null) ? 0 : getMinimumHealthyPercent().hashCode());
hashCode = prime * hashCode + ((getAlarms() == null) ? 0 : getAlarms().hashCode());
return hashCode;
}
@Override
public DeploymentConfiguration clone() {
try {
return (DeploymentConfiguration) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.ecs.model.transform.DeploymentConfigurationMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}