
com.amazonaws.services.autoscaling.AmazonAutoScalingClient Maven / Gradle / Ivy
Show all versions of aws-java-sdk-autoscaling Show documentation
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.autoscaling;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import javax.annotation.Generated;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.client.builder.AdvancedConfig;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder;
import com.amazonaws.services.autoscaling.waiters.AmazonAutoScalingWaiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.transform.*;
/**
* Client for accessing Auto Scaling. All service calls made using this client are blocking, and will not return until
* the service call completes.
*
* Amazon EC2 Auto Scaling
*
* Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling
* policies, scheduled actions, and health checks.
*
*
* For more information, see the Amazon EC2 Auto
* Scaling User Guide and the Amazon
* EC2 Auto Scaling API Reference.
*
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonAutoScalingClient extends AmazonWebServiceClient implements AmazonAutoScaling {
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonAutoScaling.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "autoscaling";
private volatile AmazonAutoScalingWaiters waiters;
/** Client configuration factory providing ClientConfigurations tailored to this client */
protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
private final AdvancedConfig advancedConfig;
/**
* Map of exception unmarshallers for all modeled exceptions
*/
private final Map> exceptionUnmarshallersMap = new HashMap>();
/**
* List of exception unmarshallers for all modeled exceptions Even though this exceptionUnmarshallers is not used in
* Clients, this is not removed since this was directly used by Client extended classes. Using this list can cause
* performance impact.
*/
protected final List> exceptionUnmarshallers = new ArrayList>();
protected Unmarshaller defaultUnmarshaller;
/**
* Constructs a new client to invoke service methods on Auto Scaling. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonAutoScalingClientBuilder#defaultClient()}
*/
@Deprecated
public AmazonAutoScalingClient() {
this(DefaultAWSCredentialsProviderChain.getInstance(), configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(ClientConfiguration clientConfiguration) {
this(DefaultAWSCredentialsProviderChain.getInstance(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} for example:
* {@code AmazonAutoScalingClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)).build();}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentials awsCredentials) {
this(awsCredentials, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials and
* client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
super(clientConfiguration);
this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials);
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider, client configuration options, and request metric collector.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @param requestMetricCollector
* optional request metric collector
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)} and
* {@link AmazonAutoScalingClientBuilder#withMetricsCollector(RequestMetricCollector)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(clientConfiguration, requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
public static AmazonAutoScalingClientBuilder builder() {
return AmazonAutoScalingClientBuilder.standard();
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonAutoScalingClient(AwsSyncClientParams clientParams) {
this(clientParams, false);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonAutoScalingClient(AwsSyncClientParams clientParams, boolean endpointDiscoveryEnabled) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
this.advancedConfig = clientParams.getAdvancedConfig();
init();
}
private void init() {
if (exceptionUnmarshallersMap.get("InstanceRefreshInProgress") == null) {
exceptionUnmarshallersMap.put("InstanceRefreshInProgress", new InstanceRefreshInProgressExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new InstanceRefreshInProgressExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("ScalingActivityInProgress") == null) {
exceptionUnmarshallersMap.put("ScalingActivityInProgress", new ScalingActivityInProgressExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new ScalingActivityInProgressExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("InvalidNextToken") == null) {
exceptionUnmarshallersMap.put("InvalidNextToken", new InvalidNextTokenExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new InvalidNextTokenExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("LimitExceeded") == null) {
exceptionUnmarshallersMap.put("LimitExceeded", new LimitExceededExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("AlreadyExists") == null) {
exceptionUnmarshallersMap.put("AlreadyExists", new AlreadyExistsExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("ActiveInstanceRefreshNotFound") == null) {
exceptionUnmarshallersMap.put("ActiveInstanceRefreshNotFound", new ActiveInstanceRefreshNotFoundExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new ActiveInstanceRefreshNotFoundExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("ResourceContention") == null) {
exceptionUnmarshallersMap.put("ResourceContention", new ResourceContentionExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new ResourceContentionExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("ServiceLinkedRoleFailure") == null) {
exceptionUnmarshallersMap.put("ServiceLinkedRoleFailure", new ServiceLinkedRoleFailureExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new ServiceLinkedRoleFailureExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("ResourceInUse") == null) {
exceptionUnmarshallersMap.put("ResourceInUse", new ResourceInUseExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new ResourceInUseExceptionUnmarshaller());
if (exceptionUnmarshallersMap.get("IrreversibleInstanceRefresh") == null) {
exceptionUnmarshallersMap.put("IrreversibleInstanceRefresh", new IrreversibleInstanceRefreshExceptionUnmarshaller());
}
exceptionUnmarshallers.add(new IrreversibleInstanceRefreshExceptionUnmarshaller());
defaultUnmarshaller = new StandardErrorUnmarshaller(com.amazonaws.services.autoscaling.model.AmazonAutoScalingException.class);
exceptionUnmarshallers.add(new StandardErrorUnmarshaller(com.amazonaws.services.autoscaling.model.AmazonAutoScalingException.class));
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
this.setEndpoint("https://autoscaling.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/autoscaling/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/autoscaling/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
*
* Attaches one or more EC2 instances to the specified Auto Scaling group.
*
*
* When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of
* instances being attached. If the number of instances being attached plus the desired capacity of the group
* exceeds the maximum size of the group, the operation fails.
*
*
* If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with
* the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also
* registered with the target groups.
*
*
* For more information, see Detach
* or attach instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param attachInstancesRequest
* @return Result of the AttachInstances operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachInstances
* @see AWS
* API Documentation
*/
@Override
public AttachInstancesResult attachInstances(AttachInstancesRequest request) {
request = beforeClientExecution(request);
return executeAttachInstances(request);
}
@SdkInternalApi
final AttachInstancesResult executeAttachInstances(AttachInstancesRequest attachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(attachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachInstancesRequestMarshaller().marshall(super.beforeMarshalling(attachInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by AttachTrafficSources, which can attach multiple traffic sources types.
* We recommend using AttachTrafficSources
to simplify how you manage traffic sources. However, we
* continue to support AttachLoadBalancerTargetGroups
. You can use both the original
* AttachLoadBalancerTargetGroups
API operation and AttachTrafficSources
on the same Auto
* Scaling group.
*
*
*
* Attaches one or more target groups to the specified Auto Scaling group.
*
*
* This operation is used with the following load balancer types:
*
*
* -
*
* Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS.
*
*
* -
*
* Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP.
*
*
* -
*
* Gateway Load Balancer - Operates at the network layer (layer 3).
*
*
*
*
* To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To
* detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.
*
*
* This operation is additive and does not detach existing target groups or Classic Load Balancers from the Auto
* Scaling group.
*
*
* For more information, see Use Elastic Load
* Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto
* Scaling User Guide.
*
*
* @param attachLoadBalancerTargetGroupsRequest
* @return Result of the AttachLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public AttachLoadBalancerTargetGroupsResult attachLoadBalancerTargetGroups(AttachLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeAttachLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final AttachLoadBalancerTargetGroupsResult executeAttachLoadBalancerTargetGroups(AttachLoadBalancerTargetGroupsRequest attachLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(attachLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(attachLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by AttachTrafficSources, which can attach multiple traffic sources types.
* We recommend using AttachTrafficSources
to simplify how you manage traffic sources. However, we
* continue to support AttachLoadBalancers
. You can use both the original
* AttachLoadBalancers
API operation and AttachTrafficSources
on the same Auto Scaling
* group.
*
*
*
* Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling
* registers the running instances with these Classic Load Balancers.
*
*
* To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach a
* load balancer from the Auto Scaling group, call the DetachLoadBalancers API.
*
*
* This operation is additive and does not detach existing Classic Load Balancers or target groups from the Auto
* Scaling group.
*
*
* For more information, see Use Elastic Load
* Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto
* Scaling User Guide.
*
*
* @param attachLoadBalancersRequest
* @return Result of the AttachLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachLoadBalancers
* @see AWS API Documentation
*/
@Override
public AttachLoadBalancersResult attachLoadBalancers(AttachLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeAttachLoadBalancers(request);
}
@SdkInternalApi
final AttachLoadBalancersResult executeAttachLoadBalancers(AttachLoadBalancersRequest attachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(attachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(attachLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public AttachLoadBalancersResult attachLoadBalancers() {
return attachLoadBalancers(new AttachLoadBalancersRequest());
}
/**
*
* Attaches one or more traffic sources to the specified Auto Scaling group.
*
*
* You can use any of the following as traffic sources for an Auto Scaling group:
*
*
* -
*
* Application Load Balancer
*
*
* -
*
* Classic Load Balancer
*
*
* -
*
* Gateway Load Balancer
*
*
* -
*
* Network Load Balancer
*
*
* -
*
* VPC Lattice
*
*
*
*
* This operation is additive and does not detach existing traffic sources from the Auto Scaling group.
*
*
* After the operation completes, use the DescribeTrafficSources API to return details about the state of the
* attachments between traffic sources and your Auto Scaling group. To detach a traffic source from the Auto Scaling
* group, call the DetachTrafficSources API.
*
*
* @param attachTrafficSourcesRequest
* @return Result of the AttachTrafficSources operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachTrafficSources
* @see AWS API Documentation
*/
@Override
public AttachTrafficSourcesResult attachTrafficSources(AttachTrafficSourcesRequest request) {
request = beforeClientExecution(request);
return executeAttachTrafficSources(request);
}
@SdkInternalApi
final AttachTrafficSourcesResult executeAttachTrafficSources(AttachTrafficSourcesRequest attachTrafficSourcesRequest) {
ExecutionContext executionContext = createExecutionContext(attachTrafficSourcesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachTrafficSourcesRequestMarshaller().marshall(super.beforeMarshalling(attachTrafficSourcesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachTrafficSources");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachTrafficSourcesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes one or more scheduled actions for the specified Auto Scaling group.
*
*
* @param batchDeleteScheduledActionRequest
* @return Result of the BatchDeleteScheduledAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.BatchDeleteScheduledAction
* @see AWS API Documentation
*/
@Override
public BatchDeleteScheduledActionResult batchDeleteScheduledAction(BatchDeleteScheduledActionRequest request) {
request = beforeClientExecution(request);
return executeBatchDeleteScheduledAction(request);
}
@SdkInternalApi
final BatchDeleteScheduledActionResult executeBatchDeleteScheduledAction(BatchDeleteScheduledActionRequest batchDeleteScheduledActionRequest) {
ExecutionContext executionContext = createExecutionContext(batchDeleteScheduledActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchDeleteScheduledActionRequestMarshaller().marshall(super.beforeMarshalling(batchDeleteScheduledActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchDeleteScheduledAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new BatchDeleteScheduledActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates one or more scheduled scaling actions for an Auto Scaling group.
*
*
* @param batchPutScheduledUpdateGroupActionRequest
* @return Result of the BatchPutScheduledUpdateGroupAction operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.BatchPutScheduledUpdateGroupAction
* @see AWS API Documentation
*/
@Override
public BatchPutScheduledUpdateGroupActionResult batchPutScheduledUpdateGroupAction(BatchPutScheduledUpdateGroupActionRequest request) {
request = beforeClientExecution(request);
return executeBatchPutScheduledUpdateGroupAction(request);
}
@SdkInternalApi
final BatchPutScheduledUpdateGroupActionResult executeBatchPutScheduledUpdateGroupAction(
BatchPutScheduledUpdateGroupActionRequest batchPutScheduledUpdateGroupActionRequest) {
ExecutionContext executionContext = createExecutionContext(batchPutScheduledUpdateGroupActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchPutScheduledUpdateGroupActionRequestMarshaller()
.marshall(super.beforeMarshalling(batchPutScheduledUpdateGroupActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchPutScheduledUpdateGroupAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new BatchPutScheduledUpdateGroupActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Cancels an instance refresh or rollback that is in progress. If an instance refresh or rollback is not in
* progress, an ActiveInstanceRefreshNotFound
error occurs.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you
* make configuration changes.
*
*
* When you cancel an instance refresh, this does not roll back any changes that it made. Use the
* RollbackInstanceRefresh API to roll back instead.
*
*
* @param cancelInstanceRefreshRequest
* @return Result of the CancelInstanceRefresh operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ActiveInstanceRefreshNotFoundException
* The request failed because an active instance refresh or rollback for the specified Auto Scaling group
* was not found.
* @sample AmazonAutoScaling.CancelInstanceRefresh
* @see AWS API Documentation
*/
@Override
public CancelInstanceRefreshResult cancelInstanceRefresh(CancelInstanceRefreshRequest request) {
request = beforeClientExecution(request);
return executeCancelInstanceRefresh(request);
}
@SdkInternalApi
final CancelInstanceRefreshResult executeCancelInstanceRefresh(CancelInstanceRefreshRequest cancelInstanceRefreshRequest) {
ExecutionContext executionContext = createExecutionContext(cancelInstanceRefreshRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CancelInstanceRefreshRequestMarshaller().marshall(super.beforeMarshalling(cancelInstanceRefreshRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CancelInstanceRefresh");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CancelInstanceRefreshResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Completes the lifecycle action for the specified token or instance with the specified result.
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a launch template or launch configuration with a user data script that runs while an instance
* is in a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when
* an instance is put into a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
*
*
* -
*
* If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API
* call.
*
*
*
*
* For more information, see Complete a lifecycle
* action in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param completeLifecycleActionRequest
* @return Result of the CompleteLifecycleAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.CompleteLifecycleAction
* @see AWS API Documentation
*/
@Override
public CompleteLifecycleActionResult completeLifecycleAction(CompleteLifecycleActionRequest request) {
request = beforeClientExecution(request);
return executeCompleteLifecycleAction(request);
}
@SdkInternalApi
final CompleteLifecycleActionResult executeCompleteLifecycleAction(CompleteLifecycleActionRequest completeLifecycleActionRequest) {
ExecutionContext executionContext = createExecutionContext(completeLifecycleActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CompleteLifecycleActionRequestMarshaller().marshall(super.beforeMarshalling(completeLifecycleActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CompleteLifecycleAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CompleteLifecycleActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* We strongly recommend using a launch template when calling this operation to ensure full functionality for
* Amazon EC2 Auto Scaling and Amazon EC2.
*
*
* Creates an Auto Scaling group with the specified name and attributes.
*
*
* If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the
* DescribeAccountLimits API. For information about updating this limit, see Quotas for Amazon EC2
* Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you're new to Amazon EC2 Auto Scaling, see the introductory tutorials in Get started
* with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* Every Auto Scaling group has three size properties (DesiredCapacity
, MaxSize
, and
* MinSize
). Usually, you set these sizes based on a specific number of instances. However, if you
* configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with
* the same units that you use for weighting instances.
*
*
* @param createAutoScalingGroupRequest
* @return Result of the CreateAutoScalingGroup operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.CreateAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public CreateAutoScalingGroupResult createAutoScalingGroup(CreateAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeCreateAutoScalingGroup(request);
}
@SdkInternalApi
final CreateAutoScalingGroupResult executeCreateAutoScalingGroup(CreateAutoScalingGroupRequest createAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(createAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAutoScalingGroupRequestMarshaller().marshall(super.beforeMarshalling(createAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a launch configuration.
*
*
* If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the
* DescribeAccountLimits API. For information about updating this limit, see Quotas for Amazon EC2
* Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* For more information, see Launch configurations
* in the Amazon EC2 Auto Scaling User Guide.
*
*
*
* Amazon EC2 Auto Scaling configures instances launched as part of an Auto Scaling group using either a launch
* template or a launch configuration. We strongly recommend that you do not use launch configurations. They do not
* provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For information about using launch
* templates, see Launch
* templates in the Amazon EC2 Auto Scaling User Guide.
*
*
*
* @param createLaunchConfigurationRequest
* @return Result of the CreateLaunchConfiguration operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.CreateLaunchConfiguration
* @see AWS API Documentation
*/
@Override
public CreateLaunchConfigurationResult createLaunchConfiguration(CreateLaunchConfigurationRequest request) {
request = beforeClientExecution(request);
return executeCreateLaunchConfiguration(request);
}
@SdkInternalApi
final CreateLaunchConfigurationResult executeCreateLaunchConfiguration(CreateLaunchConfigurationRequest createLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(createLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateLaunchConfigurationRequestMarshaller().marshall(super.beforeMarshalling(createLaunchConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateLaunchConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateLaunchConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates tags for the specified Auto Scaling group.
*
*
* When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and
* you do not get an error message.
*
*
* For more information, see Tag Auto Scaling
* groups and instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param createOrUpdateTagsRequest
* @return Result of the CreateOrUpdateTags operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.CreateOrUpdateTags
* @see AWS
* API Documentation
*/
@Override
public CreateOrUpdateTagsResult createOrUpdateTags(CreateOrUpdateTagsRequest request) {
request = beforeClientExecution(request);
return executeCreateOrUpdateTags(request);
}
@SdkInternalApi
final CreateOrUpdateTagsResult executeCreateOrUpdateTags(CreateOrUpdateTagsRequest createOrUpdateTagsRequest) {
ExecutionContext executionContext = createExecutionContext(createOrUpdateTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateOrUpdateTagsRequestMarshaller().marshall(super.beforeMarshalling(createOrUpdateTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateOrUpdateTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateOrUpdateTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified Auto Scaling group.
*
*
* If the group has instances or scaling activities in progress, you must specify the option to force the deletion
* in order for it to succeed. The force delete operation will also terminate the EC2 instances. If the group has a
* warm pool, the force delete option also deletes the warm pool.
*
*
* To remove instances from the Auto Scaling group before deleting it, call the DetachInstances API with the
* list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling
* does not launch replacement instances.
*
*
* To terminate all instances before deleting the Auto Scaling group, call the UpdateAutoScalingGroup API and
* set the minimum size and desired capacity of the Auto Scaling group to zero.
*
*
* If the group has scaling policies, deleting the group deletes the policies, the underlying alarm actions, and any
* alarm that no longer has an associated action.
*
*
* For more information, see Delete your Auto Scaling
* infrastructure in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param deleteAutoScalingGroupRequest
* @return Result of the DeleteAutoScalingGroup operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public DeleteAutoScalingGroupResult deleteAutoScalingGroup(DeleteAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeDeleteAutoScalingGroup(request);
}
@SdkInternalApi
final DeleteAutoScalingGroupResult executeDeleteAutoScalingGroup(DeleteAutoScalingGroupRequest deleteAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAutoScalingGroupRequestMarshaller().marshall(super.beforeMarshalling(deleteAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified launch configuration.
*
*
* The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch
* configuration is no longer available for use.
*
*
* @param deleteLaunchConfigurationRequest
* @return Result of the DeleteLaunchConfiguration operation returned by the service.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteLaunchConfiguration
* @see AWS API Documentation
*/
@Override
public DeleteLaunchConfigurationResult deleteLaunchConfiguration(DeleteLaunchConfigurationRequest request) {
request = beforeClientExecution(request);
return executeDeleteLaunchConfiguration(request);
}
@SdkInternalApi
final DeleteLaunchConfigurationResult executeDeleteLaunchConfiguration(DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteLaunchConfigurationRequestMarshaller().marshall(super.beforeMarshalling(deleteLaunchConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteLaunchConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteLaunchConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified lifecycle hook.
*
*
* If there are any outstanding lifecycle actions, they are completed first (ABANDON
for launching
* instances, CONTINUE
for terminating instances).
*
*
* @param deleteLifecycleHookRequest
* @return Result of the DeleteLifecycleHook operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteLifecycleHook
* @see AWS API Documentation
*/
@Override
public DeleteLifecycleHookResult deleteLifecycleHook(DeleteLifecycleHookRequest request) {
request = beforeClientExecution(request);
return executeDeleteLifecycleHook(request);
}
@SdkInternalApi
final DeleteLifecycleHookResult executeDeleteLifecycleHook(DeleteLifecycleHookRequest deleteLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteLifecycleHookRequestMarshaller().marshall(super.beforeMarshalling(deleteLifecycleHookRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteLifecycleHook");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteLifecycleHookResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified notification.
*
*
* @param deleteNotificationConfigurationRequest
* @return Result of the DeleteNotificationConfiguration operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteNotificationConfiguration
* @see AWS API Documentation
*/
@Override
public DeleteNotificationConfigurationResult deleteNotificationConfiguration(DeleteNotificationConfigurationRequest request) {
request = beforeClientExecution(request);
return executeDeleteNotificationConfiguration(request);
}
@SdkInternalApi
final DeleteNotificationConfigurationResult executeDeleteNotificationConfiguration(
DeleteNotificationConfigurationRequest deleteNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteNotificationConfigurationRequestMarshaller().marshall(super.beforeMarshalling(deleteNotificationConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteNotificationConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteNotificationConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified scaling policy.
*
*
* Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does
* not delete the alarm, even if it no longer has an associated action.
*
*
* For more information, see Delete a scaling
* policy in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param deletePolicyRequest
* @return Result of the DeletePolicy operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.DeletePolicy
* @see AWS API
* Documentation
*/
@Override
public DeletePolicyResult deletePolicy(DeletePolicyRequest request) {
request = beforeClientExecution(request);
return executeDeletePolicy(request);
}
@SdkInternalApi
final DeletePolicyResult executeDeletePolicy(DeletePolicyRequest deletePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(deletePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeletePolicyRequestMarshaller().marshall(super.beforeMarshalling(deletePolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeletePolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DeletePolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified scheduled action.
*
*
* @param deleteScheduledActionRequest
* @return Result of the DeleteScheduledAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteScheduledAction
* @see AWS API Documentation
*/
@Override
public DeleteScheduledActionResult deleteScheduledAction(DeleteScheduledActionRequest request) {
request = beforeClientExecution(request);
return executeDeleteScheduledAction(request);
}
@SdkInternalApi
final DeleteScheduledActionResult executeDeleteScheduledAction(DeleteScheduledActionRequest deleteScheduledActionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteScheduledActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteScheduledActionRequestMarshaller().marshall(super.beforeMarshalling(deleteScheduledActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteScheduledAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteScheduledActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified tags.
*
*
* @param deleteTagsRequest
* @return Result of the DeleteTags operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.DeleteTags
* @see AWS API
* Documentation
*/
@Override
public DeleteTagsResult deleteTags(DeleteTagsRequest request) {
request = beforeClientExecution(request);
return executeDeleteTags(request);
}
@SdkInternalApi
final DeleteTagsResult executeDeleteTags(DeleteTagsRequest deleteTagsRequest) {
ExecutionContext executionContext = createExecutionContext(deleteTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteTagsRequestMarshaller().marshall(super.beforeMarshalling(deleteTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DeleteTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the warm pool for the specified Auto Scaling group.
*
*
* For more information, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param deleteWarmPoolRequest
* @return Result of the DeleteWarmPool operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.DeleteWarmPool
* @see AWS API
* Documentation
*/
@Override
public DeleteWarmPoolResult deleteWarmPool(DeleteWarmPoolRequest request) {
request = beforeClientExecution(request);
return executeDeleteWarmPool(request);
}
@SdkInternalApi
final DeleteWarmPoolResult executeDeleteWarmPool(DeleteWarmPoolRequest deleteWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(deleteWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(deleteWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the current Amazon EC2 Auto Scaling resource quotas for your account.
*
*
* When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of Auto
* Scaling groups and launch configurations that you can create in a given Region. For more information, see Quotas for Amazon EC2
* Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeAccountLimitsRequest
* @return Result of the DescribeAccountLimits operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAccountLimits
* @see AWS API Documentation
*/
@Override
public DescribeAccountLimitsResult describeAccountLimits(DescribeAccountLimitsRequest request) {
request = beforeClientExecution(request);
return executeDescribeAccountLimits(request);
}
@SdkInternalApi
final DescribeAccountLimitsResult executeDescribeAccountLimits(DescribeAccountLimitsRequest describeAccountLimitsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAccountLimitsRequestMarshaller().marshall(super.beforeMarshalling(describeAccountLimitsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAccountLimits");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAccountLimitsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAccountLimitsResult describeAccountLimits() {
return describeAccountLimits(new DescribeAccountLimitsRequest());
}
/**
*
* Describes the available adjustment types for step scaling and simple scaling policies.
*
*
* The following adjustment types are supported:
*
*
* -
*
* ChangeInCapacity
*
*
* -
*
* ExactCapacity
*
*
* -
*
* PercentChangeInCapacity
*
*
*
*
* @param describeAdjustmentTypesRequest
* @return Result of the DescribeAdjustmentTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAdjustmentTypes
* @see AWS API Documentation
*/
@Override
public DescribeAdjustmentTypesResult describeAdjustmentTypes(DescribeAdjustmentTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAdjustmentTypes(request);
}
@SdkInternalApi
final DescribeAdjustmentTypesResult executeDescribeAdjustmentTypes(DescribeAdjustmentTypesRequest describeAdjustmentTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAdjustmentTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAdjustmentTypesRequestMarshaller().marshall(super.beforeMarshalling(describeAdjustmentTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAdjustmentTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAdjustmentTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAdjustmentTypesResult describeAdjustmentTypes() {
return describeAdjustmentTypes(new DescribeAdjustmentTypesRequest());
}
/**
*
* Gets information about the Auto Scaling groups in the account and Region.
*
*
* If you specify Auto Scaling group names, the output includes information for only the specified Auto Scaling
* groups. If you specify filters, the output includes information for only those Auto Scaling groups that meet the
* filter criteria. If you do not specify group names or filters, the output includes information for all Auto
* Scaling groups.
*
*
* This operation also returns information about instances in Auto Scaling groups. To retrieve information about the
* instances in a warm pool, you must call the DescribeWarmPool API.
*
*
* @param describeAutoScalingGroupsRequest
* @return Result of the DescribeAutoScalingGroups operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingGroups
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingGroupsResult describeAutoScalingGroups(DescribeAutoScalingGroupsRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingGroups(request);
}
@SdkInternalApi
final DescribeAutoScalingGroupsResult executeDescribeAutoScalingGroups(DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingGroupsRequestMarshaller().marshall(super.beforeMarshalling(describeAutoScalingGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingGroupsResult describeAutoScalingGroups() {
return describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest());
}
/**
*
* Gets information about the Auto Scaling instances in the account and Region.
*
*
* @param describeAutoScalingInstancesRequest
* @return Result of the DescribeAutoScalingInstances operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingInstances
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingInstancesResult describeAutoScalingInstances(DescribeAutoScalingInstancesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingInstances(request);
}
@SdkInternalApi
final DescribeAutoScalingInstancesResult executeDescribeAutoScalingInstances(DescribeAutoScalingInstancesRequest describeAutoScalingInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingInstancesRequestMarshaller().marshall(super.beforeMarshalling(describeAutoScalingInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingInstancesResult describeAutoScalingInstances() {
return describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest());
}
/**
*
* Describes the notification types that are supported by Amazon EC2 Auto Scaling.
*
*
* @param describeAutoScalingNotificationTypesRequest
* @return Result of the DescribeAutoScalingNotificationTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingNotificationTypes
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes(DescribeAutoScalingNotificationTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingNotificationTypes(request);
}
@SdkInternalApi
final DescribeAutoScalingNotificationTypesResult executeDescribeAutoScalingNotificationTypes(
DescribeAutoScalingNotificationTypesRequest describeAutoScalingNotificationTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingNotificationTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingNotificationTypesRequestMarshaller().marshall(super
.beforeMarshalling(describeAutoScalingNotificationTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingNotificationTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingNotificationTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes() {
return describeAutoScalingNotificationTypes(new DescribeAutoScalingNotificationTypesRequest());
}
/**
*
* Gets information about the instance refreshes for the specified Auto Scaling group from the previous six weeks.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you
* make configuration changes.
*
*
* To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the
* instance refreshes you previously initiated, including their status, start time, end time, the percentage of the
* instance refresh that is complete, and the number of instances remaining to update before the instance refresh is
* complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also
* returns information about the rollback of the instance refresh.
*
*
* @param describeInstanceRefreshesRequest
* @return Result of the DescribeInstanceRefreshes operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeInstanceRefreshes
* @see AWS API Documentation
*/
@Override
public DescribeInstanceRefreshesResult describeInstanceRefreshes(DescribeInstanceRefreshesRequest request) {
request = beforeClientExecution(request);
return executeDescribeInstanceRefreshes(request);
}
@SdkInternalApi
final DescribeInstanceRefreshesResult executeDescribeInstanceRefreshes(DescribeInstanceRefreshesRequest describeInstanceRefreshesRequest) {
ExecutionContext executionContext = createExecutionContext(describeInstanceRefreshesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeInstanceRefreshesRequestMarshaller().marshall(super.beforeMarshalling(describeInstanceRefreshesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeInstanceRefreshes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeInstanceRefreshesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gets information about the launch configurations in the account and Region.
*
*
* @param describeLaunchConfigurationsRequest
* @return Result of the DescribeLaunchConfigurations operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLaunchConfigurations
* @see AWS API Documentation
*/
@Override
public DescribeLaunchConfigurationsResult describeLaunchConfigurations(DescribeLaunchConfigurationsRequest request) {
request = beforeClientExecution(request);
return executeDescribeLaunchConfigurations(request);
}
@SdkInternalApi
final DescribeLaunchConfigurationsResult executeDescribeLaunchConfigurations(DescribeLaunchConfigurationsRequest describeLaunchConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLaunchConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLaunchConfigurationsRequestMarshaller().marshall(super.beforeMarshalling(describeLaunchConfigurationsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLaunchConfigurations");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLaunchConfigurationsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeLaunchConfigurationsResult describeLaunchConfigurations() {
return describeLaunchConfigurations(new DescribeLaunchConfigurationsRequest());
}
/**
*
* Describes the available types of lifecycle hooks.
*
*
* The following hook types are supported:
*
*
* -
*
* autoscaling:EC2_INSTANCE_LAUNCHING
*
*
* -
*
* autoscaling:EC2_INSTANCE_TERMINATING
*
*
*
*
* @param describeLifecycleHookTypesRequest
* @return Result of the DescribeLifecycleHookTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLifecycleHookTypes
* @see AWS API Documentation
*/
@Override
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes(DescribeLifecycleHookTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeLifecycleHookTypes(request);
}
@SdkInternalApi
final DescribeLifecycleHookTypesResult executeDescribeLifecycleHookTypes(DescribeLifecycleHookTypesRequest describeLifecycleHookTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHookTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLifecycleHookTypesRequestMarshaller().marshall(super.beforeMarshalling(describeLifecycleHookTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLifecycleHookTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLifecycleHookTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes() {
return describeLifecycleHookTypes(new DescribeLifecycleHookTypesRequest());
}
/**
*
* Gets information about the lifecycle hooks for the specified Auto Scaling group.
*
*
* @param describeLifecycleHooksRequest
* @return Result of the DescribeLifecycleHooks operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLifecycleHooks
* @see AWS API Documentation
*/
@Override
public DescribeLifecycleHooksResult describeLifecycleHooks(DescribeLifecycleHooksRequest request) {
request = beforeClientExecution(request);
return executeDescribeLifecycleHooks(request);
}
@SdkInternalApi
final DescribeLifecycleHooksResult executeDescribeLifecycleHooks(DescribeLifecycleHooksRequest describeLifecycleHooksRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHooksRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLifecycleHooksRequestMarshaller().marshall(super.beforeMarshalling(describeLifecycleHooksRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLifecycleHooks");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLifecycleHooksResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources
* types. We recommend using DetachTrafficSources
to simplify how you manage traffic sources. However,
* we continue to support DescribeLoadBalancerTargetGroups
. You can use both the original
* DescribeLoadBalancerTargetGroups
API operation and DescribeTrafficSources
on the same
* Auto Scaling group.
*
*
*
* Gets information about the Elastic Load Balancing target groups for the specified Auto Scaling group.
*
*
* To determine the attachment status of the target group, use the State
element in the response. When
* you attach a target group to an Auto Scaling group, the initial State
value is Adding
.
* The state transitions to Added
after all Auto Scaling instances are registered with the target
* group. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to
* InService
after at least one Auto Scaling instance passes the health check. When the target group is
* in the InService
state, Amazon EC2 Auto Scaling can terminate and replace any instances that are
* reported as unhealthy. If no registered instances pass the health checks, the target group doesn't enter the
* InService
state.
*
*
* Target groups also have an InService
state if you attach them in the CreateAutoScalingGroup
* API call. If your target group state is InService
, but it is not working properly, check the scaling
* activities by calling DescribeScalingActivities and take any corrective actions necessary.
*
*
* For help with failed health checks, see Troubleshooting Amazon EC2
* Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load
* Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto
* Scaling User Guide.
*
*
*
* You can use this operation to describe target groups that were attached by using
* AttachLoadBalancerTargetGroups, but not for target groups that were attached by using
* AttachTrafficSources.
*
*
*
* @param describeLoadBalancerTargetGroupsRequest
* @return Result of the DescribeLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @sample AmazonAutoScaling.DescribeLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public DescribeLoadBalancerTargetGroupsResult describeLoadBalancerTargetGroups(DescribeLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeDescribeLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final DescribeLoadBalancerTargetGroupsResult executeDescribeLoadBalancerTargetGroups(
DescribeLoadBalancerTargetGroupsRequest describeLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(describeLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources
* types. We recommend using DescribeTrafficSources
to simplify how you manage traffic sources.
* However, we continue to support DescribeLoadBalancers
. You can use both the original
* DescribeLoadBalancers
API operation and DescribeTrafficSources
on the same Auto Scaling
* group.
*
*
*
* Gets information about the load balancers for the specified Auto Scaling group.
*
*
* This operation describes only Classic Load Balancers. If you have Application Load Balancers, Network Load
* Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.
*
*
* To determine the attachment status of the load balancer, use the State
element in the response. When
* you attach a load balancer to an Auto Scaling group, the initial State
value is Adding
.
* The state transitions to Added
after all Auto Scaling instances are registered with the load
* balancer. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions
* to InService
after at least one Auto Scaling instance passes the health check. When the load
* balancer is in the InService
state, Amazon EC2 Auto Scaling can terminate and replace any instances
* that are reported as unhealthy. If no registered instances pass the health checks, the load balancer doesn't
* enter the InService
state.
*
*
* Load balancers also have an InService
state if you attach them in the CreateAutoScalingGroup
* API call. If your load balancer state is InService
, but it is not working properly, check the
* scaling activities by calling DescribeScalingActivities and take any corrective actions necessary.
*
*
* For help with failed health checks, see Troubleshooting Amazon EC2
* Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load
* Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto
* Scaling User Guide.
*
*
* @param describeLoadBalancersRequest
* @return Result of the DescribeLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @sample AmazonAutoScaling.DescribeLoadBalancers
* @see AWS API Documentation
*/
@Override
public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeDescribeLoadBalancers(request);
}
@SdkInternalApi
final DescribeLoadBalancersResult executeDescribeLoadBalancers(DescribeLoadBalancersRequest describeLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(describeLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(describeLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.
*
*
* @param describeMetricCollectionTypesRequest
* @return Result of the DescribeMetricCollectionTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeMetricCollectionTypes
* @see AWS API Documentation
*/
@Override
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes(DescribeMetricCollectionTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeMetricCollectionTypes(request);
}
@SdkInternalApi
final DescribeMetricCollectionTypesResult executeDescribeMetricCollectionTypes(DescribeMetricCollectionTypesRequest describeMetricCollectionTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeMetricCollectionTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeMetricCollectionTypesRequestMarshaller().marshall(super.beforeMarshalling(describeMetricCollectionTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeMetricCollectionTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeMetricCollectionTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes() {
return describeMetricCollectionTypes(new DescribeMetricCollectionTypesRequest());
}
/**
*
* Gets information about the Amazon SNS notifications that are configured for one or more Auto Scaling groups.
*
*
* @param describeNotificationConfigurationsRequest
* @return Result of the DescribeNotificationConfigurations operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeNotificationConfigurations
* @see AWS API Documentation
*/
@Override
public DescribeNotificationConfigurationsResult describeNotificationConfigurations(DescribeNotificationConfigurationsRequest request) {
request = beforeClientExecution(request);
return executeDescribeNotificationConfigurations(request);
}
@SdkInternalApi
final DescribeNotificationConfigurationsResult executeDescribeNotificationConfigurations(
DescribeNotificationConfigurationsRequest describeNotificationConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeNotificationConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeNotificationConfigurationsRequestMarshaller()
.marshall(super.beforeMarshalling(describeNotificationConfigurationsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeNotificationConfigurations");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeNotificationConfigurationsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeNotificationConfigurationsResult describeNotificationConfigurations() {
return describeNotificationConfigurations(new DescribeNotificationConfigurationsRequest());
}
/**
*
* Gets information about the scaling policies in the account and Region.
*
*
* @param describePoliciesRequest
* @return Result of the DescribePolicies operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.DescribePolicies
* @see AWS
* API Documentation
*/
@Override
public DescribePoliciesResult describePolicies(DescribePoliciesRequest request) {
request = beforeClientExecution(request);
return executeDescribePolicies(request);
}
@SdkInternalApi
final DescribePoliciesResult executeDescribePolicies(DescribePoliciesRequest describePoliciesRequest) {
ExecutionContext executionContext = createExecutionContext(describePoliciesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribePoliciesRequestMarshaller().marshall(super.beforeMarshalling(describePoliciesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribePolicies");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribePoliciesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribePoliciesResult describePolicies() {
return describePolicies(new DescribePoliciesRequest());
}
/**
*
* Gets information about the scaling activities in the account and Region.
*
*
* When scaling events occur, you see a record of the scaling activity in the scaling activities. For more
* information, see Verify a scaling
* activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* If the scaling event succeeds, the value of the StatusCode
element in the response is
* Successful
. If an attempt to launch instances failed, the StatusCode
value is
* Failed
or Cancelled
and the StatusMessage
element in the response
* indicates the cause of the failure. For help interpreting the StatusMessage
, see Troubleshooting Amazon EC2
* Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeScalingActivitiesRequest
* @return Result of the DescribeScalingActivities operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScalingActivities
* @see AWS API Documentation
*/
@Override
public DescribeScalingActivitiesResult describeScalingActivities(DescribeScalingActivitiesRequest request) {
request = beforeClientExecution(request);
return executeDescribeScalingActivities(request);
}
@SdkInternalApi
final DescribeScalingActivitiesResult executeDescribeScalingActivities(DescribeScalingActivitiesRequest describeScalingActivitiesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingActivitiesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScalingActivitiesRequestMarshaller().marshall(super.beforeMarshalling(describeScalingActivitiesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScalingActivities");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScalingActivitiesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScalingActivitiesResult describeScalingActivities() {
return describeScalingActivities(new DescribeScalingActivitiesRequest());
}
/**
*
* Describes the scaling process types for use with the ResumeProcesses and SuspendProcesses APIs.
*
*
* @param describeScalingProcessTypesRequest
* @return Result of the DescribeScalingProcessTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScalingProcessTypes
* @see AWS API Documentation
*/
@Override
public DescribeScalingProcessTypesResult describeScalingProcessTypes(DescribeScalingProcessTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeScalingProcessTypes(request);
}
@SdkInternalApi
final DescribeScalingProcessTypesResult executeDescribeScalingProcessTypes(DescribeScalingProcessTypesRequest describeScalingProcessTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingProcessTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScalingProcessTypesRequestMarshaller().marshall(super.beforeMarshalling(describeScalingProcessTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScalingProcessTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScalingProcessTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScalingProcessTypesResult describeScalingProcessTypes() {
return describeScalingProcessTypes(new DescribeScalingProcessTypesRequest());
}
/**
*
* Gets information about the scheduled actions that haven't run or that have not reached their end time.
*
*
* To describe the scaling activities for scheduled actions that have already run, call the
* DescribeScalingActivities API.
*
*
* @param describeScheduledActionsRequest
* @return Result of the DescribeScheduledActions operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScheduledActions
* @see AWS API Documentation
*/
@Override
public DescribeScheduledActionsResult describeScheduledActions(DescribeScheduledActionsRequest request) {
request = beforeClientExecution(request);
return executeDescribeScheduledActions(request);
}
@SdkInternalApi
final DescribeScheduledActionsResult executeDescribeScheduledActions(DescribeScheduledActionsRequest describeScheduledActionsRequest) {
ExecutionContext executionContext = createExecutionContext(describeScheduledActionsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScheduledActionsRequestMarshaller().marshall(super.beforeMarshalling(describeScheduledActionsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScheduledActions");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScheduledActionsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScheduledActionsResult describeScheduledActions() {
return describeScheduledActions(new DescribeScheduledActionsRequest());
}
/**
*
* Describes the specified tags.
*
*
* You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling
* group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it
* to be included in the results.
*
*
* You can also specify multiple filters. The result includes information for a particular tag only if it matches
* all the filters. If there's no match, no special message is returned.
*
*
* For more information, see Tag Auto Scaling
* groups and instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeTagsRequest
* @return Result of the DescribeTags operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeTags
* @see AWS API
* Documentation
*/
@Override
public DescribeTagsResult describeTags(DescribeTagsRequest request) {
request = beforeClientExecution(request);
return executeDescribeTags(request);
}
@SdkInternalApi
final DescribeTagsResult executeDescribeTags(DescribeTagsRequest describeTagsRequest) {
ExecutionContext executionContext = createExecutionContext(describeTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTagsRequestMarshaller().marshall(super.beforeMarshalling(describeTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DescribeTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeTagsResult describeTags() {
return describeTags(new DescribeTagsRequest());
}
/**
*
* Describes the termination policies supported by Amazon EC2 Auto Scaling.
*
*
* For more information, see Configure
* termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeTerminationPolicyTypesRequest
* @return Result of the DescribeTerminationPolicyTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeTerminationPolicyTypes
* @see AWS API Documentation
*/
@Override
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes(DescribeTerminationPolicyTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeTerminationPolicyTypes(request);
}
@SdkInternalApi
final DescribeTerminationPolicyTypesResult executeDescribeTerminationPolicyTypes(DescribeTerminationPolicyTypesRequest describeTerminationPolicyTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeTerminationPolicyTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTerminationPolicyTypesRequestMarshaller().marshall(super.beforeMarshalling(describeTerminationPolicyTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTerminationPolicyTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeTerminationPolicyTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes() {
return describeTerminationPolicyTypes(new DescribeTerminationPolicyTypesRequest());
}
/**
*
* Gets information about the traffic sources for the specified Auto Scaling group.
*
*
* You can optionally provide a traffic source type. If you provide a traffic source type, then the results only
* include that traffic source type.
*
*
* If you do not provide a traffic source type, then the results include all the traffic sources for the specified
* Auto Scaling group.
*
*
* @param describeTrafficSourcesRequest
* @return Result of the DescribeTrafficSources operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @sample AmazonAutoScaling.DescribeTrafficSources
* @see AWS API Documentation
*/
@Override
public DescribeTrafficSourcesResult describeTrafficSources(DescribeTrafficSourcesRequest request) {
request = beforeClientExecution(request);
return executeDescribeTrafficSources(request);
}
@SdkInternalApi
final DescribeTrafficSourcesResult executeDescribeTrafficSources(DescribeTrafficSourcesRequest describeTrafficSourcesRequest) {
ExecutionContext executionContext = createExecutionContext(describeTrafficSourcesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTrafficSourcesRequestMarshaller().marshall(super.beforeMarshalling(describeTrafficSourcesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTrafficSources");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeTrafficSourcesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gets information about a warm pool and its instances.
*
*
* For more information, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeWarmPoolRequest
* @return Result of the DescribeWarmPool operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeWarmPool
* @see AWS
* API Documentation
*/
@Override
public DescribeWarmPoolResult describeWarmPool(DescribeWarmPoolRequest request) {
request = beforeClientExecution(request);
return executeDescribeWarmPool(request);
}
@SdkInternalApi
final DescribeWarmPoolResult executeDescribeWarmPool(DescribeWarmPoolRequest describeWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(describeWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(describeWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes one or more instances from the specified Auto Scaling group.
*
*
* After the instances are detached, you can manage them independent of the Auto Scaling group.
*
*
* If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to
* replace the ones that are detached.
*
*
* If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the
* load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from
* the target groups.
*
*
* For more information, see Detach
* or attach instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param detachInstancesRequest
* @return Result of the DetachInstances operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachInstances
* @see AWS
* API Documentation
*/
@Override
public DetachInstancesResult detachInstances(DetachInstancesRequest request) {
request = beforeClientExecution(request);
return executeDetachInstances(request);
}
@SdkInternalApi
final DetachInstancesResult executeDetachInstances(DetachInstancesRequest detachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(detachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachInstancesRequestMarshaller().marshall(super.beforeMarshalling(detachInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by DetachTrafficSources, which can detach multiple traffic sources types.
* We recommend using DetachTrafficSources
to simplify how you manage traffic sources. However, we
* continue to support DetachLoadBalancerTargetGroups
. You can use both the original
* DetachLoadBalancerTargetGroups
API operation and DetachTrafficSources
on the same Auto
* Scaling group.
*
*
*
* Detaches one or more target groups from the specified Auto Scaling group.
*
*
* When you detach a target group, it enters the Removing
state while deregistering the instances in
* the group. When all instances are deregistered, then you can no longer describe the target group using the
* DescribeLoadBalancerTargetGroups API call. The instances remain running.
*
*
*
* You can use this operation to detach target groups that were attached by using
* AttachLoadBalancerTargetGroups, but not for target groups that were attached by using
* AttachTrafficSources.
*
*
*
* @param detachLoadBalancerTargetGroupsRequest
* @return Result of the DetachLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public DetachLoadBalancerTargetGroupsResult detachLoadBalancerTargetGroups(DetachLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeDetachLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final DetachLoadBalancerTargetGroupsResult executeDetachLoadBalancerTargetGroups(DetachLoadBalancerTargetGroupsRequest detachLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(detachLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(detachLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* This API operation is superseded by DetachTrafficSources, which can detach multiple traffic sources types.
* We recommend using DetachTrafficSources
to simplify how you manage traffic sources. However, we
* continue to support DetachLoadBalancers
. You can use both the original
* DetachLoadBalancers
API operation and DetachTrafficSources
on the same Auto Scaling
* group.
*
*
*
* Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
*
*
* This operation detaches only Classic Load Balancers. If you have Application Load Balancers, Network Load
* Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead.
*
*
* When you detach a load balancer, it enters the Removing
state while deregistering the instances in
* the group. When all instances are deregistered, then you can no longer describe the load balancer using the
* DescribeLoadBalancers API call. The instances remain running.
*
*
* @param detachLoadBalancersRequest
* @return Result of the DetachLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachLoadBalancers
* @see AWS API Documentation
*/
@Override
public DetachLoadBalancersResult detachLoadBalancers(DetachLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeDetachLoadBalancers(request);
}
@SdkInternalApi
final DetachLoadBalancersResult executeDetachLoadBalancers(DetachLoadBalancersRequest detachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(detachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(detachLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DetachLoadBalancersResult detachLoadBalancers() {
return detachLoadBalancers(new DetachLoadBalancersRequest());
}
/**
*
* Detaches one or more traffic sources from the specified Auto Scaling group.
*
*
* When you detach a traffic source, it enters the Removing
state while deregistering the instances in
* the group. When all instances are deregistered, then you can no longer describe the traffic source using the
* DescribeTrafficSources API call. The instances continue to run.
*
*
* @param detachTrafficSourcesRequest
* @return Result of the DetachTrafficSources operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachTrafficSources
* @see AWS API Documentation
*/
@Override
public DetachTrafficSourcesResult detachTrafficSources(DetachTrafficSourcesRequest request) {
request = beforeClientExecution(request);
return executeDetachTrafficSources(request);
}
@SdkInternalApi
final DetachTrafficSourcesResult executeDetachTrafficSources(DetachTrafficSourcesRequest detachTrafficSourcesRequest) {
ExecutionContext executionContext = createExecutionContext(detachTrafficSourcesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachTrafficSourcesRequestMarshaller().marshall(super.beforeMarshalling(detachTrafficSourcesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachTrafficSources");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachTrafficSourcesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Disables group metrics collection for the specified Auto Scaling group.
*
*
* @param disableMetricsCollectionRequest
* @return Result of the DisableMetricsCollection operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DisableMetricsCollection
* @see AWS API Documentation
*/
@Override
public DisableMetricsCollectionResult disableMetricsCollection(DisableMetricsCollectionRequest request) {
request = beforeClientExecution(request);
return executeDisableMetricsCollection(request);
}
@SdkInternalApi
final DisableMetricsCollectionResult executeDisableMetricsCollection(DisableMetricsCollectionRequest disableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(disableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DisableMetricsCollectionRequestMarshaller().marshall(super.beforeMarshalling(disableMetricsCollectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DisableMetricsCollection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DisableMetricsCollectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Enables group metrics collection for the specified Auto Scaling group.
*
*
* You can use these metrics to track changes in an Auto Scaling group and to set alarms on threshold values. You
* can view group metrics using the Amazon EC2 Auto Scaling console or the CloudWatch console. For more information,
* see
* Monitor CloudWatch metrics for your Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User
* Guide.
*
*
* @param enableMetricsCollectionRequest
* @return Result of the EnableMetricsCollection operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.EnableMetricsCollection
* @see AWS API Documentation
*/
@Override
public EnableMetricsCollectionResult enableMetricsCollection(EnableMetricsCollectionRequest request) {
request = beforeClientExecution(request);
return executeEnableMetricsCollection(request);
}
@SdkInternalApi
final EnableMetricsCollectionResult executeEnableMetricsCollection(EnableMetricsCollectionRequest enableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(enableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new EnableMetricsCollectionRequestMarshaller().marshall(super.beforeMarshalling(enableMetricsCollectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "EnableMetricsCollection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new EnableMetricsCollectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances into the standby state.
*
*
* If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as
* long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or
* greater than the minimum capacity of the group.
*
*
* If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches
* new instances to replace the instances on standby.
*
*
* For more information, see Temporarily removing
* instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param enterStandbyRequest
* @return Result of the EnterStandby operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.EnterStandby
* @see AWS API
* Documentation
*/
@Override
public EnterStandbyResult enterStandby(EnterStandbyRequest request) {
request = beforeClientExecution(request);
return executeEnterStandby(request);
}
@SdkInternalApi
final EnterStandbyResult executeEnterStandby(EnterStandbyRequest enterStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(enterStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new EnterStandbyRequestMarshaller().marshall(super.beforeMarshalling(enterStandbyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "EnterStandby");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new EnterStandbyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Executes the specified policy. This can be useful for testing the design of your scaling policy.
*
*
* @param executePolicyRequest
* @return Result of the ExecutePolicy operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ExecutePolicy
* @see AWS API
* Documentation
*/
@Override
public ExecutePolicyResult executePolicy(ExecutePolicyRequest request) {
request = beforeClientExecution(request);
return executeExecutePolicy(request);
}
@SdkInternalApi
final ExecutePolicyResult executeExecutePolicy(ExecutePolicyRequest executePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(executePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExecutePolicyRequestMarshaller().marshall(super.beforeMarshalling(executePolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExecutePolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new ExecutePolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances out of the standby state.
*
*
* After you put the instances back in service, the desired capacity is incremented.
*
*
* For more information, see Temporarily removing
* instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param exitStandbyRequest
* @return Result of the ExitStandby operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ExitStandby
* @see AWS API
* Documentation
*/
@Override
public ExitStandbyResult exitStandby(ExitStandbyRequest request) {
request = beforeClientExecution(request);
return executeExitStandby(request);
}
@SdkInternalApi
final ExitStandbyResult executeExitStandby(ExitStandbyRequest exitStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(exitStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExitStandbyRequestMarshaller().marshall(super.beforeMarshalling(exitStandbyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExitStandby");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new ExitStandbyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Retrieves the forecast data for a predictive scaling policy.
*
*
* Load forecasts are predictions of the hourly load values using historical load data from CloudWatch and an
* analysis of historical trends. Capacity forecasts are represented as predicted values for the minimum capacity
* that is needed on an hourly basis, based on the hourly load forecast.
*
*
* A minimum of 24 hours of data is required to create the initial forecasts. However, having a full 14 days of
* historical data results in more accurate forecasts.
*
*
* For more information, see Predictive
* scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param getPredictiveScalingForecastRequest
* @return Result of the GetPredictiveScalingForecast operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.GetPredictiveScalingForecast
* @see AWS API Documentation
*/
@Override
public GetPredictiveScalingForecastResult getPredictiveScalingForecast(GetPredictiveScalingForecastRequest request) {
request = beforeClientExecution(request);
return executeGetPredictiveScalingForecast(request);
}
@SdkInternalApi
final GetPredictiveScalingForecastResult executeGetPredictiveScalingForecast(GetPredictiveScalingForecastRequest getPredictiveScalingForecastRequest) {
ExecutionContext executionContext = createExecutionContext(getPredictiveScalingForecastRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new GetPredictiveScalingForecastRequestMarshaller().marshall(super.beforeMarshalling(getPredictiveScalingForecastRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetPredictiveScalingForecast");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new GetPredictiveScalingForecastResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a lifecycle hook for the specified Auto Scaling group.
*
*
* Lifecycle hooks let you create solutions that are aware of events in the Auto Scaling instance lifecycle, and
* then perform a custom action on instances when the corresponding lifecycle event occurs.
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a launch template or launch configuration with a user data script that runs while an instance
* is in a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when
* an instance is put into a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state using the
* RecordLifecycleActionHeartbeat API call.
*
*
* -
*
* If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API
* call.
*
*
*
*
* For more information, see Amazon EC2 Auto Scaling
* lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call
* fails.
*
*
* You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If
* you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.
*
*
* @param putLifecycleHookRequest
* @return Result of the PutLifecycleHook operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutLifecycleHook
* @see AWS
* API Documentation
*/
@Override
public PutLifecycleHookResult putLifecycleHook(PutLifecycleHookRequest request) {
request = beforeClientExecution(request);
return executePutLifecycleHook(request);
}
@SdkInternalApi
final PutLifecycleHookResult executePutLifecycleHook(PutLifecycleHookRequest putLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(putLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutLifecycleHookRequestMarshaller().marshall(super.beforeMarshalling(putLifecycleHookRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutLifecycleHook");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutLifecycleHookResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the
* specified topic can have messages delivered to an endpoint such as a web server or an email address.
*
*
* This configuration overwrites any existing configuration.
*
*
* For more information, see Amazon SNS
* notification options for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.
*
*
* @param putNotificationConfigurationRequest
* @return Result of the PutNotificationConfiguration operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.PutNotificationConfiguration
* @see AWS API Documentation
*/
@Override
public PutNotificationConfigurationResult putNotificationConfiguration(PutNotificationConfigurationRequest request) {
request = beforeClientExecution(request);
return executePutNotificationConfiguration(request);
}
@SdkInternalApi
final PutNotificationConfigurationResult executePutNotificationConfiguration(PutNotificationConfigurationRequest putNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(putNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutNotificationConfigurationRequestMarshaller().marshall(super.beforeMarshalling(putNotificationConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutNotificationConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutNotificationConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a scaling policy for an Auto Scaling group. Scaling policies are used to scale an Auto Scaling
* group based on configurable metrics. If no policies are defined, the dynamic scaling and predictive scaling
* features are not used.
*
*
* For more information about using dynamic scaling, see Target tracking
* scaling policies and Step and simple scaling
* policies in the Amazon EC2 Auto Scaling User Guide.
*
*
* For more information about using predictive scaling, see Predictive
* scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* You can view the scaling policies for an Auto Scaling group using the DescribePolicies API call. If you
* are no longer using a scaling policy, you can delete it by calling the DeletePolicy API.
*
*
* @param putScalingPolicyRequest
* @return Result of the PutScalingPolicy operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.PutScalingPolicy
* @see AWS
* API Documentation
*/
@Override
public PutScalingPolicyResult putScalingPolicy(PutScalingPolicyRequest request) {
request = beforeClientExecution(request);
return executePutScalingPolicy(request);
}
@SdkInternalApi
final PutScalingPolicyResult executePutScalingPolicy(PutScalingPolicyRequest putScalingPolicyRequest) {
ExecutionContext executionContext = createExecutionContext(putScalingPolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutScalingPolicyRequestMarshaller().marshall(super.beforeMarshalling(putScalingPolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutScalingPolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutScalingPolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a scheduled scaling action for an Auto Scaling group.
*
*
* For more information, see Scheduled
* scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call.
* If you are no longer using a scheduled action, you can delete it by calling the DeleteScheduledAction API.
*
*
* If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.
*
*
* @param putScheduledUpdateGroupActionRequest
* @return Result of the PutScheduledUpdateGroupAction operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutScheduledUpdateGroupAction
* @see AWS API Documentation
*/
@Override
public PutScheduledUpdateGroupActionResult putScheduledUpdateGroupAction(PutScheduledUpdateGroupActionRequest request) {
request = beforeClientExecution(request);
return executePutScheduledUpdateGroupAction(request);
}
@SdkInternalApi
final PutScheduledUpdateGroupActionResult executePutScheduledUpdateGroupAction(PutScheduledUpdateGroupActionRequest putScheduledUpdateGroupActionRequest) {
ExecutionContext executionContext = createExecutionContext(putScheduledUpdateGroupActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutScheduledUpdateGroupActionRequestMarshaller().marshall(super.beforeMarshalling(putScheduledUpdateGroupActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutScheduledUpdateGroupAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutScheduledUpdateGroupActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2
* instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto
* Scaling group can draw on the warm pool to meet its new desired capacity.
*
*
* This operation must be called from the Region in which the Auto Scaling group was created.
*
*
* You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer
* using a warm pool, you can delete it by calling the DeleteWarmPool API.
*
*
* For more information, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param putWarmPoolRequest
* @return Result of the PutWarmPool operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutWarmPool
* @see AWS API
* Documentation
*/
@Override
public PutWarmPoolResult putWarmPool(PutWarmPoolRequest request) {
request = beforeClientExecution(request);
return executePutWarmPool(request);
}
@SdkInternalApi
final PutWarmPoolResult executePutWarmPool(PutWarmPoolRequest putWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(putWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(putWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new PutWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the
* timeout by the length of time defined using the PutLifecycleHook API call.
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a launch template or launch configuration with a user data script that runs while an instance
* is in a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when
* an instance is put into a wait state due to a lifecycle hook.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
*
*
* -
*
* If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API
* call.
*
*
*
*
* For more information, see Amazon EC2 Auto Scaling
* lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param recordLifecycleActionHeartbeatRequest
* @return Result of the RecordLifecycleActionHeartbeat operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.RecordLifecycleActionHeartbeat
* @see AWS API Documentation
*/
@Override
public RecordLifecycleActionHeartbeatResult recordLifecycleActionHeartbeat(RecordLifecycleActionHeartbeatRequest request) {
request = beforeClientExecution(request);
return executeRecordLifecycleActionHeartbeat(request);
}
@SdkInternalApi
final RecordLifecycleActionHeartbeatResult executeRecordLifecycleActionHeartbeat(RecordLifecycleActionHeartbeatRequest recordLifecycleActionHeartbeatRequest) {
ExecutionContext executionContext = createExecutionContext(recordLifecycleActionHeartbeatRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new RecordLifecycleActionHeartbeatRequestMarshaller().marshall(super.beforeMarshalling(recordLifecycleActionHeartbeatRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "RecordLifecycleActionHeartbeat");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new RecordLifecycleActionHeartbeatResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling
* group.
*
*
* For more information, see Suspend and resume
* Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param resumeProcessesRequest
* @return Result of the ResumeProcesses operation returned by the service.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ResumeProcesses
* @see AWS
* API Documentation
*/
@Override
public ResumeProcessesResult resumeProcesses(ResumeProcessesRequest request) {
request = beforeClientExecution(request);
return executeResumeProcesses(request);
}
@SdkInternalApi
final ResumeProcessesResult executeResumeProcesses(ResumeProcessesRequest resumeProcessesRequest) {
ExecutionContext executionContext = createExecutionContext(resumeProcessesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ResumeProcessesRequestMarshaller().marshall(super.beforeMarshalling(resumeProcessesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ResumeProcesses");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new ResumeProcessesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Cancels an instance refresh that is in progress and rolls back any changes that it made. Amazon EC2 Auto Scaling
* replaces any instances that were replaced during the instance refresh. This restores your Auto Scaling group to
* the configuration that it was using before the start of the instance refresh.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you
* make configuration changes.
*
*
* A rollback is not supported in the following situations:
*
*
* -
*
* There is no desired configuration specified for the instance refresh.
*
*
* -
*
* The Auto Scaling group has a launch template that uses an Amazon Web Services Systems Manager parameter instead
* of an AMI ID for the ImageId
property.
*
*
* -
*
* The Auto Scaling group uses the launch template's $Latest
or $Default
version.
*
*
*
*
* When you receive a successful response from this operation, Amazon EC2 Auto Scaling immediately begins replacing
* instances. You can check the status of this operation through the DescribeInstanceRefreshes API operation.
*
*
* @param rollbackInstanceRefreshRequest
* @return Result of the RollbackInstanceRefresh operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ActiveInstanceRefreshNotFoundException
* The request failed because an active instance refresh or rollback for the specified Auto Scaling group
* was not found.
* @throws IrreversibleInstanceRefreshException
* The request failed because a desired configuration was not found or an incompatible launch template (uses
* a Systems Manager parameter instead of an AMI ID) or launch template version ($Latest
or
* $Default
) is present on the Auto Scaling group.
* @sample AmazonAutoScaling.RollbackInstanceRefresh
* @see AWS API Documentation
*/
@Override
public RollbackInstanceRefreshResult rollbackInstanceRefresh(RollbackInstanceRefreshRequest request) {
request = beforeClientExecution(request);
return executeRollbackInstanceRefresh(request);
}
@SdkInternalApi
final RollbackInstanceRefreshResult executeRollbackInstanceRefresh(RollbackInstanceRefreshRequest rollbackInstanceRefreshRequest) {
ExecutionContext executionContext = createExecutionContext(rollbackInstanceRefreshRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new RollbackInstanceRefreshRequestMarshaller().marshall(super.beforeMarshalling(rollbackInstanceRefreshRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "RollbackInstanceRefresh");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new RollbackInstanceRefreshResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Sets the size of the specified Auto Scaling group.
*
*
* If a scale-in activity occurs as a result of a new DesiredCapacity
value that is lower than the
* current size of the group, the Auto Scaling group uses its termination policy to determine which instances to
* terminate.
*
*
* For more information, see Manual
* scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param setDesiredCapacityRequest
* @return Result of the SetDesiredCapacity operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetDesiredCapacity
* @see AWS
* API Documentation
*/
@Override
public SetDesiredCapacityResult setDesiredCapacity(SetDesiredCapacityRequest request) {
request = beforeClientExecution(request);
return executeSetDesiredCapacity(request);
}
@SdkInternalApi
final SetDesiredCapacityResult executeSetDesiredCapacity(SetDesiredCapacityRequest setDesiredCapacityRequest) {
ExecutionContext executionContext = createExecutionContext(setDesiredCapacityRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetDesiredCapacityRequestMarshaller().marshall(super.beforeMarshalling(setDesiredCapacityRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetDesiredCapacity");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetDesiredCapacityResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Sets the health status of the specified instance.
*
*
* For more information, see Health checks
* for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param setInstanceHealthRequest
* @return Result of the SetInstanceHealth operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetInstanceHealth
* @see AWS
* API Documentation
*/
@Override
public SetInstanceHealthResult setInstanceHealth(SetInstanceHealthRequest request) {
request = beforeClientExecution(request);
return executeSetInstanceHealth(request);
}
@SdkInternalApi
final SetInstanceHealthResult executeSetInstanceHealth(SetInstanceHealthRequest setInstanceHealthRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceHealthRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetInstanceHealthRequestMarshaller().marshall(super.beforeMarshalling(setInstanceHealthRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetInstanceHealth");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetInstanceHealthResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the instance protection settings of the specified instances. This operation cannot be called on instances
* in a warm pool.
*
*
* For more information, see Use
* instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.
*
*
* @param setInstanceProtectionRequest
* @return Result of the SetInstanceProtection operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetInstanceProtection
* @see AWS API Documentation
*/
@Override
public SetInstanceProtectionResult setInstanceProtection(SetInstanceProtectionRequest request) {
request = beforeClientExecution(request);
return executeSetInstanceProtection(request);
}
@SdkInternalApi
final SetInstanceProtectionResult executeSetInstanceProtection(SetInstanceProtectionRequest setInstanceProtectionRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceProtectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetInstanceProtectionRequestMarshaller().marshall(super.beforeMarshalling(setInstanceProtectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetInstanceProtection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetInstanceProtectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts an instance refresh.
*
*
* This operation is part of the