com.amazonaws.services.autoscaling.AmazonAutoScalingClient Maven / Gradle / Ivy
Show all versions of aws-java-sdk-autoscaling Show documentation
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.autoscaling;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import javax.annotation.Generated;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.client.builder.AdvancedConfig;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder;
import com.amazonaws.services.autoscaling.waiters.AmazonAutoScalingWaiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.transform.*;
/**
* Client for accessing Auto Scaling. All service calls made using this client are blocking, and will not return until
* the service call completes.
*
* Amazon EC2 Auto Scaling
*
* Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined scaling
* policies, scheduled actions, and health checks.
*
*
* For more information about Amazon EC2 Auto Scaling, see the Amazon EC2 Auto
* Scaling User Guide. For information about granting IAM users required permissions for calls to Amazon EC2 Auto
* Scaling, see Granting IAM
* users required permissions for Amazon EC2 Auto Scaling resources in the Amazon EC2 Auto Scaling API
* Reference.
*
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonAutoScalingClient extends AmazonWebServiceClient implements AmazonAutoScaling {
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonAutoScaling.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "autoscaling";
private volatile AmazonAutoScalingWaiters waiters;
/** Client configuration factory providing ClientConfigurations tailored to this client */
protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
private final AdvancedConfig advancedConfig;
/**
* List of exception unmarshallers for all modeled exceptions
*/
protected final List> exceptionUnmarshallers = new ArrayList>();
/**
* Constructs a new client to invoke service methods on Auto Scaling. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonAutoScalingClientBuilder#defaultClient()}
*/
@Deprecated
public AmazonAutoScalingClient() {
this(DefaultAWSCredentialsProviderChain.getInstance(), configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(ClientConfiguration clientConfiguration) {
this(DefaultAWSCredentialsProviderChain.getInstance(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} for example:
* {@code AmazonAutoScalingClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)).build();}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentials awsCredentials) {
this(awsCredentials, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials and
* client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
super(clientConfiguration);
this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials);
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified AWS account credentials
* provider, client configuration options, and request metric collector.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to Auto Scaling (ex: proxy settings,
* retry counts, etc.).
* @param requestMetricCollector
* optional request metric collector
* @deprecated use {@link AmazonAutoScalingClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonAutoScalingClientBuilder#withClientConfiguration(ClientConfiguration)} and
* {@link AmazonAutoScalingClientBuilder#withMetricsCollector(RequestMetricCollector)}
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(clientConfiguration, requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
public static AmazonAutoScalingClientBuilder builder() {
return AmazonAutoScalingClientBuilder.standard();
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonAutoScalingClient(AwsSyncClientParams clientParams) {
this(clientParams, false);
}
/**
* Constructs a new client to invoke service methods on Auto Scaling using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonAutoScalingClient(AwsSyncClientParams clientParams, boolean endpointDiscoveryEnabled) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
this.advancedConfig = clientParams.getAdvancedConfig();
init();
}
private void init() {
exceptionUnmarshallers.add(new InstanceRefreshInProgressExceptionUnmarshaller());
exceptionUnmarshallers.add(new ScalingActivityInProgressExceptionUnmarshaller());
exceptionUnmarshallers.add(new InvalidNextTokenExceptionUnmarshaller());
exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller());
exceptionUnmarshallers.add(new ActiveInstanceRefreshNotFoundExceptionUnmarshaller());
exceptionUnmarshallers.add(new ResourceContentionExceptionUnmarshaller());
exceptionUnmarshallers.add(new ServiceLinkedRoleFailureExceptionUnmarshaller());
exceptionUnmarshallers.add(new ResourceInUseExceptionUnmarshaller());
exceptionUnmarshallers.add(new StandardErrorUnmarshaller(com.amazonaws.services.autoscaling.model.AmazonAutoScalingException.class));
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
this.setEndpoint("https://autoscaling.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/autoscaling/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/autoscaling/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
*
* Attaches one or more EC2 instances to the specified Auto Scaling group.
*
*
* When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of
* instances being attached. If the number of instances being attached plus the desired capacity of the group
* exceeds the maximum size of the group, the operation fails.
*
*
* If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with
* the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also
* registered with the target groups.
*
*
* For more information, see Attach EC2 instances to
* your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param attachInstancesRequest
* @return Result of the AttachInstances operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachInstances
* @see AWS
* API Documentation
*/
@Override
public AttachInstancesResult attachInstances(AttachInstancesRequest request) {
request = beforeClientExecution(request);
return executeAttachInstances(request);
}
@SdkInternalApi
final AttachInstancesResult executeAttachInstances(AttachInstancesRequest attachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(attachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachInstancesRequestMarshaller().marshall(super.beforeMarshalling(attachInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Attaches one or more target groups to the specified Auto Scaling group.
*
*
* This operation is used with the following load balancer types:
*
*
* -
*
* Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS.
*
*
* -
*
* Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP.
*
*
* -
*
* Gateway Load Balancer - Operates at the network layer (layer 3).
*
*
*
*
* To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To
* detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.
*
*
* For more information, see Elastic Load
* Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param attachLoadBalancerTargetGroupsRequest
* @return Result of the AttachLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public AttachLoadBalancerTargetGroupsResult attachLoadBalancerTargetGroups(AttachLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeAttachLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final AttachLoadBalancerTargetGroupsResult executeAttachLoadBalancerTargetGroups(AttachLoadBalancerTargetGroupsRequest attachLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(attachLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(attachLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
*
* To attach an Application Load Balancer, Network Load Balancer, or Gateway Load Balancer, use the
* AttachLoadBalancerTargetGroups API operation instead.
*
*
*
* Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling
* registers the running instances with these Classic Load Balancers.
*
*
* To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach
* the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.
*
*
* For more information, see Elastic Load
* Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param attachLoadBalancersRequest
* @return Result of the AttachLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.AttachLoadBalancers
* @see AWS API Documentation
*/
@Override
public AttachLoadBalancersResult attachLoadBalancers(AttachLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeAttachLoadBalancers(request);
}
@SdkInternalApi
final AttachLoadBalancersResult executeAttachLoadBalancers(AttachLoadBalancersRequest attachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(attachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AttachLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(attachLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AttachLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new AttachLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public AttachLoadBalancersResult attachLoadBalancers() {
return attachLoadBalancers(new AttachLoadBalancersRequest());
}
/**
*
* Deletes one or more scheduled actions for the specified Auto Scaling group.
*
*
* @param batchDeleteScheduledActionRequest
* @return Result of the BatchDeleteScheduledAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.BatchDeleteScheduledAction
* @see AWS API Documentation
*/
@Override
public BatchDeleteScheduledActionResult batchDeleteScheduledAction(BatchDeleteScheduledActionRequest request) {
request = beforeClientExecution(request);
return executeBatchDeleteScheduledAction(request);
}
@SdkInternalApi
final BatchDeleteScheduledActionResult executeBatchDeleteScheduledAction(BatchDeleteScheduledActionRequest batchDeleteScheduledActionRequest) {
ExecutionContext executionContext = createExecutionContext(batchDeleteScheduledActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchDeleteScheduledActionRequestMarshaller().marshall(super.beforeMarshalling(batchDeleteScheduledActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchDeleteScheduledAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new BatchDeleteScheduledActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates one or more scheduled scaling actions for an Auto Scaling group.
*
*
* @param batchPutScheduledUpdateGroupActionRequest
* @return Result of the BatchPutScheduledUpdateGroupAction operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.BatchPutScheduledUpdateGroupAction
* @see AWS API Documentation
*/
@Override
public BatchPutScheduledUpdateGroupActionResult batchPutScheduledUpdateGroupAction(BatchPutScheduledUpdateGroupActionRequest request) {
request = beforeClientExecution(request);
return executeBatchPutScheduledUpdateGroupAction(request);
}
@SdkInternalApi
final BatchPutScheduledUpdateGroupActionResult executeBatchPutScheduledUpdateGroupAction(
BatchPutScheduledUpdateGroupActionRequest batchPutScheduledUpdateGroupActionRequest) {
ExecutionContext executionContext = createExecutionContext(batchPutScheduledUpdateGroupActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchPutScheduledUpdateGroupActionRequestMarshaller()
.marshall(super.beforeMarshalling(batchPutScheduledUpdateGroupActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchPutScheduledUpdateGroupAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new BatchPutScheduledUpdateGroupActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have
* already been completed, but it prevents new replacements from being started.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you
* make configuration changes.
*
*
* @param cancelInstanceRefreshRequest
* @return Result of the CancelInstanceRefresh operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ActiveInstanceRefreshNotFoundException
* The request failed because an active instance refresh for the specified Auto Scaling group was not found.
* @sample AmazonAutoScaling.CancelInstanceRefresh
* @see AWS API Documentation
*/
@Override
public CancelInstanceRefreshResult cancelInstanceRefresh(CancelInstanceRefreshRequest request) {
request = beforeClientExecution(request);
return executeCancelInstanceRefresh(request);
}
@SdkInternalApi
final CancelInstanceRefreshResult executeCancelInstanceRefresh(CancelInstanceRefreshRequest cancelInstanceRefreshRequest) {
ExecutionContext executionContext = createExecutionContext(cancelInstanceRefreshRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CancelInstanceRefreshRequestMarshaller().marshall(super.beforeMarshalling(cancelInstanceRefreshRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CancelInstanceRefresh");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CancelInstanceRefreshResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Completes the lifecycle action for the specified token or instance with the specified result.
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when
* Amazon EC2 Auto Scaling launches or terminates instances.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
*
*
* -
*
* If you finish before the timeout period ends, complete the lifecycle action.
*
*
*
*
* For more information, see Amazon EC2 Auto Scaling
* lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param completeLifecycleActionRequest
* @return Result of the CompleteLifecycleAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.CompleteLifecycleAction
* @see AWS API Documentation
*/
@Override
public CompleteLifecycleActionResult completeLifecycleAction(CompleteLifecycleActionRequest request) {
request = beforeClientExecution(request);
return executeCompleteLifecycleAction(request);
}
@SdkInternalApi
final CompleteLifecycleActionResult executeCompleteLifecycleAction(CompleteLifecycleActionRequest completeLifecycleActionRequest) {
ExecutionContext executionContext = createExecutionContext(completeLifecycleActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CompleteLifecycleActionRequestMarshaller().marshall(super.beforeMarshalling(completeLifecycleActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CompleteLifecycleAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CompleteLifecycleActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* We strongly recommend using a launch template when calling this operation to ensure full functionality for
* Amazon EC2 Auto Scaling and Amazon EC2.
*
*
* Creates an Auto Scaling group with the specified name and attributes.
*
*
* If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the
* DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling
* service quotas in the Amazon EC2 Auto Scaling User Guide.
*
*
* For introductory exercises for creating an Auto Scaling group, see Getting started with
* Amazon EC2 Auto Scaling and Tutorial: Set up a
* scaled and load-balanced application in the Amazon EC2 Auto Scaling User Guide. For more information,
* see Auto Scaling groups
* in the Amazon EC2 Auto Scaling User Guide.
*
*
* Every Auto Scaling group has three size parameters (DesiredCapacity
, MaxSize
, and
* MinSize
). Usually, you set these sizes based on a specific number of instances. However, if you
* configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with
* the same units that you use for weighting instances.
*
*
* @param createAutoScalingGroupRequest
* @return Result of the CreateAutoScalingGroup operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.CreateAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public CreateAutoScalingGroupResult createAutoScalingGroup(CreateAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeCreateAutoScalingGroup(request);
}
@SdkInternalApi
final CreateAutoScalingGroupResult executeCreateAutoScalingGroup(CreateAutoScalingGroupRequest createAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(createAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAutoScalingGroupRequestMarshaller().marshall(super.beforeMarshalling(createAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a launch configuration.
*
*
* If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the
* DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling
* service quotas in the Amazon EC2 Auto Scaling User Guide.
*
*
* For more information, see Launch configurations
* in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param createLaunchConfigurationRequest
* @return Result of the CreateLaunchConfiguration operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.CreateLaunchConfiguration
* @see AWS API Documentation
*/
@Override
public CreateLaunchConfigurationResult createLaunchConfiguration(CreateLaunchConfigurationRequest request) {
request = beforeClientExecution(request);
return executeCreateLaunchConfiguration(request);
}
@SdkInternalApi
final CreateLaunchConfigurationResult executeCreateLaunchConfiguration(CreateLaunchConfigurationRequest createLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(createLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateLaunchConfigurationRequestMarshaller().marshall(super.beforeMarshalling(createLaunchConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateLaunchConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateLaunchConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates tags for the specified Auto Scaling group.
*
*
* When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and
* you do not get an error message.
*
*
* For more information, see Tagging Auto Scaling groups
* and instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param createOrUpdateTagsRequest
* @return Result of the CreateOrUpdateTags operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.CreateOrUpdateTags
* @see AWS
* API Documentation
*/
@Override
public CreateOrUpdateTagsResult createOrUpdateTags(CreateOrUpdateTagsRequest request) {
request = beforeClientExecution(request);
return executeCreateOrUpdateTags(request);
}
@SdkInternalApi
final CreateOrUpdateTagsResult executeCreateOrUpdateTags(CreateOrUpdateTagsRequest createOrUpdateTagsRequest) {
ExecutionContext executionContext = createExecutionContext(createOrUpdateTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateOrUpdateTagsRequestMarshaller().marshall(super.beforeMarshalling(createOrUpdateTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateOrUpdateTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new CreateOrUpdateTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified Auto Scaling group.
*
*
* If the group has instances or scaling activities in progress, you must specify the option to force the deletion
* in order for it to succeed.
*
*
* If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm
* that no longer has an associated action.
*
*
* To remove instances from the Auto Scaling group before deleting it, call the DetachInstances API with the
* list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling
* does not launch replacement instances.
*
*
* To terminate all instances before deleting the Auto Scaling group, call the UpdateAutoScalingGroup API and
* set the minimum size and desired capacity of the Auto Scaling group to zero.
*
*
* @param deleteAutoScalingGroupRequest
* @return Result of the DeleteAutoScalingGroup operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public DeleteAutoScalingGroupResult deleteAutoScalingGroup(DeleteAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeDeleteAutoScalingGroup(request);
}
@SdkInternalApi
final DeleteAutoScalingGroupResult executeDeleteAutoScalingGroup(DeleteAutoScalingGroupRequest deleteAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAutoScalingGroupRequestMarshaller().marshall(super.beforeMarshalling(deleteAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified launch configuration.
*
*
* The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch
* configuration is no longer available for use.
*
*
* @param deleteLaunchConfigurationRequest
* @return Result of the DeleteLaunchConfiguration operation returned by the service.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteLaunchConfiguration
* @see AWS API Documentation
*/
@Override
public DeleteLaunchConfigurationResult deleteLaunchConfiguration(DeleteLaunchConfigurationRequest request) {
request = beforeClientExecution(request);
return executeDeleteLaunchConfiguration(request);
}
@SdkInternalApi
final DeleteLaunchConfigurationResult executeDeleteLaunchConfiguration(DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteLaunchConfigurationRequestMarshaller().marshall(super.beforeMarshalling(deleteLaunchConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteLaunchConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteLaunchConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified lifecycle hook.
*
*
* If there are any outstanding lifecycle actions, they are completed first (ABANDON
for launching
* instances, CONTINUE
for terminating instances).
*
*
* @param deleteLifecycleHookRequest
* @return Result of the DeleteLifecycleHook operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteLifecycleHook
* @see AWS API Documentation
*/
@Override
public DeleteLifecycleHookResult deleteLifecycleHook(DeleteLifecycleHookRequest request) {
request = beforeClientExecution(request);
return executeDeleteLifecycleHook(request);
}
@SdkInternalApi
final DeleteLifecycleHookResult executeDeleteLifecycleHook(DeleteLifecycleHookRequest deleteLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteLifecycleHookRequestMarshaller().marshall(super.beforeMarshalling(deleteLifecycleHookRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteLifecycleHook");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteLifecycleHookResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified notification.
*
*
* @param deleteNotificationConfigurationRequest
* @return Result of the DeleteNotificationConfiguration operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteNotificationConfiguration
* @see AWS API Documentation
*/
@Override
public DeleteNotificationConfigurationResult deleteNotificationConfiguration(DeleteNotificationConfigurationRequest request) {
request = beforeClientExecution(request);
return executeDeleteNotificationConfiguration(request);
}
@SdkInternalApi
final DeleteNotificationConfigurationResult executeDeleteNotificationConfiguration(
DeleteNotificationConfigurationRequest deleteNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteNotificationConfigurationRequestMarshaller().marshall(super.beforeMarshalling(deleteNotificationConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteNotificationConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteNotificationConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified scaling policy.
*
*
* Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does
* not delete the alarm, even if it no longer has an associated action.
*
*
* For more information, see Deleting a scaling
* policy in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param deletePolicyRequest
* @return Result of the DeletePolicy operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.DeletePolicy
* @see AWS API
* Documentation
*/
@Override
public DeletePolicyResult deletePolicy(DeletePolicyRequest request) {
request = beforeClientExecution(request);
return executeDeletePolicy(request);
}
@SdkInternalApi
final DeletePolicyResult executeDeletePolicy(DeletePolicyRequest deletePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(deletePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeletePolicyRequestMarshaller().marshall(super.beforeMarshalling(deletePolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeletePolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DeletePolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified scheduled action.
*
*
* @param deleteScheduledActionRequest
* @return Result of the DeleteScheduledAction operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DeleteScheduledAction
* @see AWS API Documentation
*/
@Override
public DeleteScheduledActionResult deleteScheduledAction(DeleteScheduledActionRequest request) {
request = beforeClientExecution(request);
return executeDeleteScheduledAction(request);
}
@SdkInternalApi
final DeleteScheduledActionResult executeDeleteScheduledAction(DeleteScheduledActionRequest deleteScheduledActionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteScheduledActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteScheduledActionRequestMarshaller().marshall(super.beforeMarshalling(deleteScheduledActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteScheduledAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteScheduledActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified tags.
*
*
* @param deleteTagsRequest
* @return Result of the DeleteTags operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.DeleteTags
* @see AWS API
* Documentation
*/
@Override
public DeleteTagsResult deleteTags(DeleteTagsRequest request) {
request = beforeClientExecution(request);
return executeDeleteTags(request);
}
@SdkInternalApi
final DeleteTagsResult executeDeleteTags(DeleteTagsRequest deleteTagsRequest) {
ExecutionContext executionContext = createExecutionContext(deleteTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteTagsRequestMarshaller().marshall(super.beforeMarshalling(deleteTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DeleteTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the warm pool for the specified Auto Scaling group.
*
*
* For more information, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param deleteWarmPoolRequest
* @return Result of the DeleteWarmPool operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @sample AmazonAutoScaling.DeleteWarmPool
* @see AWS API
* Documentation
*/
@Override
public DeleteWarmPoolResult deleteWarmPool(DeleteWarmPoolRequest request) {
request = beforeClientExecution(request);
return executeDeleteWarmPool(request);
}
@SdkInternalApi
final DeleteWarmPoolResult executeDeleteWarmPool(DeleteWarmPoolRequest deleteWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(deleteWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(deleteWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DeleteWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the current Amazon EC2 Auto Scaling resource quotas for your account.
*
*
* When you establish an account, the account has initial quotas on the maximum number of Auto Scaling groups and
* launch configurations that you can create in a given Region. For more information, see Amazon EC2 Auto Scaling
* service quotas in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeAccountLimitsRequest
* @return Result of the DescribeAccountLimits operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAccountLimits
* @see AWS API Documentation
*/
@Override
public DescribeAccountLimitsResult describeAccountLimits(DescribeAccountLimitsRequest request) {
request = beforeClientExecution(request);
return executeDescribeAccountLimits(request);
}
@SdkInternalApi
final DescribeAccountLimitsResult executeDescribeAccountLimits(DescribeAccountLimitsRequest describeAccountLimitsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAccountLimitsRequestMarshaller().marshall(super.beforeMarshalling(describeAccountLimitsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAccountLimits");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAccountLimitsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAccountLimitsResult describeAccountLimits() {
return describeAccountLimits(new DescribeAccountLimitsRequest());
}
/**
*
* Describes the available adjustment types for step scaling and simple scaling policies.
*
*
* The following adjustment types are supported:
*
*
* -
*
* ChangeInCapacity
*
*
* -
*
* ExactCapacity
*
*
* -
*
* PercentChangeInCapacity
*
*
*
*
* @param describeAdjustmentTypesRequest
* @return Result of the DescribeAdjustmentTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAdjustmentTypes
* @see AWS API Documentation
*/
@Override
public DescribeAdjustmentTypesResult describeAdjustmentTypes(DescribeAdjustmentTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAdjustmentTypes(request);
}
@SdkInternalApi
final DescribeAdjustmentTypesResult executeDescribeAdjustmentTypes(DescribeAdjustmentTypesRequest describeAdjustmentTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAdjustmentTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAdjustmentTypesRequestMarshaller().marshall(super.beforeMarshalling(describeAdjustmentTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAdjustmentTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAdjustmentTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAdjustmentTypesResult describeAdjustmentTypes() {
return describeAdjustmentTypes(new DescribeAdjustmentTypesRequest());
}
/**
*
* Gets information about the Auto Scaling groups in the account and Region.
*
*
* This operation returns information about instances in Auto Scaling groups. To retrieve information about the
* instances in a warm pool, you must call the DescribeWarmPool API.
*
*
* @param describeAutoScalingGroupsRequest
* @return Result of the DescribeAutoScalingGroups operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingGroups
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingGroupsResult describeAutoScalingGroups(DescribeAutoScalingGroupsRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingGroups(request);
}
@SdkInternalApi
final DescribeAutoScalingGroupsResult executeDescribeAutoScalingGroups(DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingGroupsRequestMarshaller().marshall(super.beforeMarshalling(describeAutoScalingGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingGroupsResult describeAutoScalingGroups() {
return describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest());
}
/**
*
* Gets information about the Auto Scaling instances in the account and Region.
*
*
* @param describeAutoScalingInstancesRequest
* @return Result of the DescribeAutoScalingInstances operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingInstances
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingInstancesResult describeAutoScalingInstances(DescribeAutoScalingInstancesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingInstances(request);
}
@SdkInternalApi
final DescribeAutoScalingInstancesResult executeDescribeAutoScalingInstances(DescribeAutoScalingInstancesRequest describeAutoScalingInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingInstancesRequestMarshaller().marshall(super.beforeMarshalling(describeAutoScalingInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingInstancesResult describeAutoScalingInstances() {
return describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest());
}
/**
*
* Describes the notification types that are supported by Amazon EC2 Auto Scaling.
*
*
* @param describeAutoScalingNotificationTypesRequest
* @return Result of the DescribeAutoScalingNotificationTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeAutoScalingNotificationTypes
* @see AWS API Documentation
*/
@Override
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes(DescribeAutoScalingNotificationTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeAutoScalingNotificationTypes(request);
}
@SdkInternalApi
final DescribeAutoScalingNotificationTypesResult executeDescribeAutoScalingNotificationTypes(
DescribeAutoScalingNotificationTypesRequest describeAutoScalingNotificationTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingNotificationTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeAutoScalingNotificationTypesRequestMarshaller().marshall(super
.beforeMarshalling(describeAutoScalingNotificationTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeAutoScalingNotificationTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeAutoScalingNotificationTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes() {
return describeAutoScalingNotificationTypes(new DescribeAutoScalingNotificationTypesRequest());
}
/**
*
* Gets information about the instance refreshes for the specified Auto Scaling group.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you
* make configuration changes.
*
*
* To help you determine the status of an instance refresh, this operation returns information about the instance
* refreshes you previously initiated, including their status, end time, the percentage of the instance refresh that
* is complete, and the number of instances remaining to update before the instance refresh is complete.
*
*
* The following are the possible statuses:
*
*
* -
*
* Pending
- The request was created, but the operation has not started.
*
*
* -
*
* InProgress
- The operation is in progress.
*
*
* -
*
* Successful
- The operation completed successfully.
*
*
* -
*
* Failed
- The operation failed to complete. You can troubleshoot using the status reason and the
* scaling activities.
*
*
* -
*
* Cancelling
- An ongoing operation is being cancelled. Cancellation does not roll back any
* replacements that have already been completed, but it prevents new replacements from being started.
*
*
* -
*
* Cancelled
- The operation is cancelled.
*
*
*
*
* @param describeInstanceRefreshesRequest
* @return Result of the DescribeInstanceRefreshes operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeInstanceRefreshes
* @see AWS API Documentation
*/
@Override
public DescribeInstanceRefreshesResult describeInstanceRefreshes(DescribeInstanceRefreshesRequest request) {
request = beforeClientExecution(request);
return executeDescribeInstanceRefreshes(request);
}
@SdkInternalApi
final DescribeInstanceRefreshesResult executeDescribeInstanceRefreshes(DescribeInstanceRefreshesRequest describeInstanceRefreshesRequest) {
ExecutionContext executionContext = createExecutionContext(describeInstanceRefreshesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeInstanceRefreshesRequestMarshaller().marshall(super.beforeMarshalling(describeInstanceRefreshesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeInstanceRefreshes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeInstanceRefreshesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gets information about the launch configurations in the account and Region.
*
*
* @param describeLaunchConfigurationsRequest
* @return Result of the DescribeLaunchConfigurations operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLaunchConfigurations
* @see AWS API Documentation
*/
@Override
public DescribeLaunchConfigurationsResult describeLaunchConfigurations(DescribeLaunchConfigurationsRequest request) {
request = beforeClientExecution(request);
return executeDescribeLaunchConfigurations(request);
}
@SdkInternalApi
final DescribeLaunchConfigurationsResult executeDescribeLaunchConfigurations(DescribeLaunchConfigurationsRequest describeLaunchConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLaunchConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLaunchConfigurationsRequestMarshaller().marshall(super.beforeMarshalling(describeLaunchConfigurationsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLaunchConfigurations");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLaunchConfigurationsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeLaunchConfigurationsResult describeLaunchConfigurations() {
return describeLaunchConfigurations(new DescribeLaunchConfigurationsRequest());
}
/**
*
* Describes the available types of lifecycle hooks.
*
*
* The following hook types are supported:
*
*
* -
*
* autoscaling:EC2_INSTANCE_LAUNCHING
*
*
* -
*
* autoscaling:EC2_INSTANCE_TERMINATING
*
*
*
*
* @param describeLifecycleHookTypesRequest
* @return Result of the DescribeLifecycleHookTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLifecycleHookTypes
* @see AWS API Documentation
*/
@Override
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes(DescribeLifecycleHookTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeLifecycleHookTypes(request);
}
@SdkInternalApi
final DescribeLifecycleHookTypesResult executeDescribeLifecycleHookTypes(DescribeLifecycleHookTypesRequest describeLifecycleHookTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHookTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLifecycleHookTypesRequestMarshaller().marshall(super.beforeMarshalling(describeLifecycleHookTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLifecycleHookTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLifecycleHookTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes() {
return describeLifecycleHookTypes(new DescribeLifecycleHookTypesRequest());
}
/**
*
* Gets information about the lifecycle hooks for the specified Auto Scaling group.
*
*
* @param describeLifecycleHooksRequest
* @return Result of the DescribeLifecycleHooks operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLifecycleHooks
* @see AWS API Documentation
*/
@Override
public DescribeLifecycleHooksResult describeLifecycleHooks(DescribeLifecycleHooksRequest request) {
request = beforeClientExecution(request);
return executeDescribeLifecycleHooks(request);
}
@SdkInternalApi
final DescribeLifecycleHooksResult executeDescribeLifecycleHooks(DescribeLifecycleHooksRequest describeLifecycleHooksRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHooksRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLifecycleHooksRequestMarshaller().marshall(super.beforeMarshalling(describeLifecycleHooksRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLifecycleHooks");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLifecycleHooksResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gets information about the load balancer target groups for the specified Auto Scaling group.
*
*
* To determine the availability of registered instances, use the State
element in the response. When
* you attach a target group to an Auto Scaling group, the initial State
value is Adding
.
* The state transitions to Added
after all Auto Scaling instances are registered with the target
* group. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to
* InService
after at least one Auto Scaling instance passes the health check. When the target group is
* in the InService
state, Amazon EC2 Auto Scaling can terminate and replace any instances that are
* reported as unhealthy. If no registered instances pass the health checks, the target group doesn't enter the
* InService
state.
*
*
* Target groups also have an InService
state if you attach them in the CreateAutoScalingGroup
* API call. If your target group state is InService
, but it is not working properly, check the scaling
* activities by calling DescribeScalingActivities and take any corrective actions necessary.
*
*
* For help with failed health checks, see Troubleshooting Amazon EC2
* Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Elastic Load
* Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeLoadBalancerTargetGroupsRequest
* @return Result of the DescribeLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public DescribeLoadBalancerTargetGroupsResult describeLoadBalancerTargetGroups(DescribeLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeDescribeLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final DescribeLoadBalancerTargetGroupsResult executeDescribeLoadBalancerTargetGroups(
DescribeLoadBalancerTargetGroupsRequest describeLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(describeLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gets information about the load balancers for the specified Auto Scaling group.
*
*
* This operation describes only Classic Load Balancers. If you have Application Load Balancers, Network Load
* Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.
*
*
* To determine the availability of registered instances, use the State
element in the response. When
* you attach a load balancer to an Auto Scaling group, the initial State
value is Adding
.
* The state transitions to Added
after all Auto Scaling instances are registered with the load
* balancer. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions
* to InService
after at least one Auto Scaling instance passes the health check. When the load
* balancer is in the InService
state, Amazon EC2 Auto Scaling can terminate and replace any instances
* that are reported as unhealthy. If no registered instances pass the health checks, the load balancer doesn't
* enter the InService
state.
*
*
* Load balancers also have an InService
state if you attach them in the CreateAutoScalingGroup
* API call. If your load balancer state is InService
, but it is not working properly, check the
* scaling activities by calling DescribeScalingActivities and take any corrective actions necessary.
*
*
* For help with failed health checks, see Troubleshooting Amazon EC2
* Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Elastic Load
* Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeLoadBalancersRequest
* @return Result of the DescribeLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeLoadBalancers
* @see AWS API Documentation
*/
@Override
public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeDescribeLoadBalancers(request);
}
@SdkInternalApi
final DescribeLoadBalancersResult executeDescribeLoadBalancers(DescribeLoadBalancersRequest describeLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(describeLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(describeLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.
*
*
* The GroupStandbyInstances
metric is not returned by default. You must explicitly request this metric
* when calling the EnableMetricsCollection API.
*
*
* @param describeMetricCollectionTypesRequest
* @return Result of the DescribeMetricCollectionTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeMetricCollectionTypes
* @see AWS API Documentation
*/
@Override
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes(DescribeMetricCollectionTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeMetricCollectionTypes(request);
}
@SdkInternalApi
final DescribeMetricCollectionTypesResult executeDescribeMetricCollectionTypes(DescribeMetricCollectionTypesRequest describeMetricCollectionTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeMetricCollectionTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeMetricCollectionTypesRequestMarshaller().marshall(super.beforeMarshalling(describeMetricCollectionTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeMetricCollectionTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeMetricCollectionTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes() {
return describeMetricCollectionTypes(new DescribeMetricCollectionTypesRequest());
}
/**
*
* Gets information about the Amazon SNS notifications that are configured for one or more Auto Scaling groups.
*
*
* @param describeNotificationConfigurationsRequest
* @return Result of the DescribeNotificationConfigurations operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeNotificationConfigurations
* @see AWS API Documentation
*/
@Override
public DescribeNotificationConfigurationsResult describeNotificationConfigurations(DescribeNotificationConfigurationsRequest request) {
request = beforeClientExecution(request);
return executeDescribeNotificationConfigurations(request);
}
@SdkInternalApi
final DescribeNotificationConfigurationsResult executeDescribeNotificationConfigurations(
DescribeNotificationConfigurationsRequest describeNotificationConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeNotificationConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeNotificationConfigurationsRequestMarshaller()
.marshall(super.beforeMarshalling(describeNotificationConfigurationsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeNotificationConfigurations");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeNotificationConfigurationsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeNotificationConfigurationsResult describeNotificationConfigurations() {
return describeNotificationConfigurations(new DescribeNotificationConfigurationsRequest());
}
/**
*
* Gets information about the scaling policies in the account and Region.
*
*
* @param describePoliciesRequest
* @return Result of the DescribePolicies operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.DescribePolicies
* @see AWS
* API Documentation
*/
@Override
public DescribePoliciesResult describePolicies(DescribePoliciesRequest request) {
request = beforeClientExecution(request);
return executeDescribePolicies(request);
}
@SdkInternalApi
final DescribePoliciesResult executeDescribePolicies(DescribePoliciesRequest describePoliciesRequest) {
ExecutionContext executionContext = createExecutionContext(describePoliciesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribePoliciesRequestMarshaller().marshall(super.beforeMarshalling(describePoliciesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribePolicies");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribePoliciesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribePoliciesResult describePolicies() {
return describePolicies(new DescribePoliciesRequest());
}
/**
*
* Gets information about the scaling activities in the account and Region.
*
*
* When scaling events occur, you see a record of the scaling activity in the scaling activities. For more
* information, see Verifying a scaling
* activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* If the scaling event succeeds, the value of the StatusCode
element in the response is
* Successful
. If an attempt to launch instances failed, the StatusCode
value is
* Failed
or Cancelled
and the StatusMessage
element in the response
* indicates the cause of the failure. For help interpreting the StatusMessage
, see Troubleshooting Amazon EC2
* Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeScalingActivitiesRequest
* @return Result of the DescribeScalingActivities operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScalingActivities
* @see AWS API Documentation
*/
@Override
public DescribeScalingActivitiesResult describeScalingActivities(DescribeScalingActivitiesRequest request) {
request = beforeClientExecution(request);
return executeDescribeScalingActivities(request);
}
@SdkInternalApi
final DescribeScalingActivitiesResult executeDescribeScalingActivities(DescribeScalingActivitiesRequest describeScalingActivitiesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingActivitiesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScalingActivitiesRequestMarshaller().marshall(super.beforeMarshalling(describeScalingActivitiesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScalingActivities");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScalingActivitiesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScalingActivitiesResult describeScalingActivities() {
return describeScalingActivities(new DescribeScalingActivitiesRequest());
}
/**
*
* Describes the scaling process types for use with the ResumeProcesses and SuspendProcesses APIs.
*
*
* @param describeScalingProcessTypesRequest
* @return Result of the DescribeScalingProcessTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScalingProcessTypes
* @see AWS API Documentation
*/
@Override
public DescribeScalingProcessTypesResult describeScalingProcessTypes(DescribeScalingProcessTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeScalingProcessTypes(request);
}
@SdkInternalApi
final DescribeScalingProcessTypesResult executeDescribeScalingProcessTypes(DescribeScalingProcessTypesRequest describeScalingProcessTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingProcessTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScalingProcessTypesRequestMarshaller().marshall(super.beforeMarshalling(describeScalingProcessTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScalingProcessTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScalingProcessTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScalingProcessTypesResult describeScalingProcessTypes() {
return describeScalingProcessTypes(new DescribeScalingProcessTypesRequest());
}
/**
*
* Gets information about the scheduled actions that haven't run or that have not reached their end time.
*
*
* To describe the scaling activities for scheduled actions that have already run, call the
* DescribeScalingActivities API.
*
*
* @param describeScheduledActionsRequest
* @return Result of the DescribeScheduledActions operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeScheduledActions
* @see AWS API Documentation
*/
@Override
public DescribeScheduledActionsResult describeScheduledActions(DescribeScheduledActionsRequest request) {
request = beforeClientExecution(request);
return executeDescribeScheduledActions(request);
}
@SdkInternalApi
final DescribeScheduledActionsResult executeDescribeScheduledActions(DescribeScheduledActionsRequest describeScheduledActionsRequest) {
ExecutionContext executionContext = createExecutionContext(describeScheduledActionsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeScheduledActionsRequestMarshaller().marshall(super.beforeMarshalling(describeScheduledActionsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeScheduledActions");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeScheduledActionsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeScheduledActionsResult describeScheduledActions() {
return describeScheduledActions(new DescribeScheduledActionsRequest());
}
/**
*
* Describes the specified tags.
*
*
* You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling
* group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it
* to be included in the results.
*
*
* You can also specify multiple filters. The result includes information for a particular tag only if it matches
* all the filters. If there's no match, no special message is returned.
*
*
* For more information, see Tagging Auto Scaling groups
* and instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeTagsRequest
* @return Result of the DescribeTags operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeTags
* @see AWS API
* Documentation
*/
@Override
public DescribeTagsResult describeTags(DescribeTagsRequest request) {
request = beforeClientExecution(request);
return executeDescribeTags(request);
}
@SdkInternalApi
final DescribeTagsResult executeDescribeTags(DescribeTagsRequest describeTagsRequest) {
ExecutionContext executionContext = createExecutionContext(describeTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTagsRequestMarshaller().marshall(super.beforeMarshalling(describeTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new DescribeTagsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeTagsResult describeTags() {
return describeTags(new DescribeTagsRequest());
}
/**
*
* Describes the termination policies supported by Amazon EC2 Auto Scaling.
*
*
* For more information, see Controlling which Auto
* Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeTerminationPolicyTypesRequest
* @return Result of the DescribeTerminationPolicyTypes operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeTerminationPolicyTypes
* @see AWS API Documentation
*/
@Override
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes(DescribeTerminationPolicyTypesRequest request) {
request = beforeClientExecution(request);
return executeDescribeTerminationPolicyTypes(request);
}
@SdkInternalApi
final DescribeTerminationPolicyTypesResult executeDescribeTerminationPolicyTypes(DescribeTerminationPolicyTypesRequest describeTerminationPolicyTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeTerminationPolicyTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTerminationPolicyTypesRequestMarshaller().marshall(super.beforeMarshalling(describeTerminationPolicyTypesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTerminationPolicyTypes");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeTerminationPolicyTypesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes() {
return describeTerminationPolicyTypes(new DescribeTerminationPolicyTypesRequest());
}
/**
*
* Gets information about a warm pool and its instances.
*
*
* For more information, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param describeWarmPoolRequest
* @return Result of the DescribeWarmPool operation returned by the service.
* @throws InvalidNextTokenException
* The NextToken
value is not valid.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DescribeWarmPool
* @see AWS
* API Documentation
*/
@Override
public DescribeWarmPoolResult describeWarmPool(DescribeWarmPoolRequest request) {
request = beforeClientExecution(request);
return executeDescribeWarmPool(request);
}
@SdkInternalApi
final DescribeWarmPoolResult executeDescribeWarmPool(DescribeWarmPoolRequest describeWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(describeWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(describeWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DescribeWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes one or more instances from the specified Auto Scaling group.
*
*
* After the instances are detached, you can manage them independent of the Auto Scaling group.
*
*
* If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to
* replace the ones that are detached.
*
*
* If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the
* load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from
* the target groups.
*
*
* For more information, see Detach EC2 instances from
* your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param detachInstancesRequest
* @return Result of the DetachInstances operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachInstances
* @see AWS
* API Documentation
*/
@Override
public DetachInstancesResult detachInstances(DetachInstancesRequest request) {
request = beforeClientExecution(request);
return executeDetachInstances(request);
}
@SdkInternalApi
final DetachInstancesResult executeDetachInstances(DetachInstancesRequest detachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(detachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachInstancesRequestMarshaller().marshall(super.beforeMarshalling(detachInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachInstances");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachInstancesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Detaches one or more target groups from the specified Auto Scaling group.
*
*
* @param detachLoadBalancerTargetGroupsRequest
* @return Result of the DetachLoadBalancerTargetGroups operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachLoadBalancerTargetGroups
* @see AWS API Documentation
*/
@Override
public DetachLoadBalancerTargetGroupsResult detachLoadBalancerTargetGroups(DetachLoadBalancerTargetGroupsRequest request) {
request = beforeClientExecution(request);
return executeDetachLoadBalancerTargetGroups(request);
}
@SdkInternalApi
final DetachLoadBalancerTargetGroupsResult executeDetachLoadBalancerTargetGroups(DetachLoadBalancerTargetGroupsRequest detachLoadBalancerTargetGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(detachLoadBalancerTargetGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachLoadBalancerTargetGroupsRequestMarshaller().marshall(super.beforeMarshalling(detachLoadBalancerTargetGroupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachLoadBalancerTargetGroups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachLoadBalancerTargetGroupsResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
*
*
* This operation detaches only Classic Load Balancers. If you have Application Load Balancers, Network Load
* Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead.
*
*
* When you detach a load balancer, it enters the Removing
state while deregistering the instances in
* the group. When all instances are deregistered, then you can no longer describe the load balancer using the
* DescribeLoadBalancers API call. The instances remain running.
*
*
* @param detachLoadBalancersRequest
* @return Result of the DetachLoadBalancers operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DetachLoadBalancers
* @see AWS API Documentation
*/
@Override
public DetachLoadBalancersResult detachLoadBalancers(DetachLoadBalancersRequest request) {
request = beforeClientExecution(request);
return executeDetachLoadBalancers(request);
}
@SdkInternalApi
final DetachLoadBalancersResult executeDetachLoadBalancers(DetachLoadBalancersRequest detachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(detachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DetachLoadBalancersRequestMarshaller().marshall(super.beforeMarshalling(detachLoadBalancersRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DetachLoadBalancers");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DetachLoadBalancersResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DetachLoadBalancersResult detachLoadBalancers() {
return detachLoadBalancers(new DetachLoadBalancersRequest());
}
/**
*
* Disables group metrics for the specified Auto Scaling group.
*
*
* @param disableMetricsCollectionRequest
* @return Result of the DisableMetricsCollection operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.DisableMetricsCollection
* @see AWS API Documentation
*/
@Override
public DisableMetricsCollectionResult disableMetricsCollection(DisableMetricsCollectionRequest request) {
request = beforeClientExecution(request);
return executeDisableMetricsCollection(request);
}
@SdkInternalApi
final DisableMetricsCollectionResult executeDisableMetricsCollection(DisableMetricsCollectionRequest disableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(disableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DisableMetricsCollectionRequestMarshaller().marshall(super.beforeMarshalling(disableMetricsCollectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DisableMetricsCollection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new DisableMetricsCollectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring CloudWatch
* metrics for your Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param enableMetricsCollectionRequest
* @return Result of the EnableMetricsCollection operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.EnableMetricsCollection
* @see AWS API Documentation
*/
@Override
public EnableMetricsCollectionResult enableMetricsCollection(EnableMetricsCollectionRequest request) {
request = beforeClientExecution(request);
return executeEnableMetricsCollection(request);
}
@SdkInternalApi
final EnableMetricsCollectionResult executeEnableMetricsCollection(EnableMetricsCollectionRequest enableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(enableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new EnableMetricsCollectionRequestMarshaller().marshall(super.beforeMarshalling(enableMetricsCollectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "EnableMetricsCollection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new EnableMetricsCollectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances into the standby state.
*
*
* If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as
* long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or
* greater than the minimum capacity of the group.
*
*
* If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches
* new instances to replace the instances on standby.
*
*
* For more information, see Temporarily removing
* instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param enterStandbyRequest
* @return Result of the EnterStandby operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.EnterStandby
* @see AWS API
* Documentation
*/
@Override
public EnterStandbyResult enterStandby(EnterStandbyRequest request) {
request = beforeClientExecution(request);
return executeEnterStandby(request);
}
@SdkInternalApi
final EnterStandbyResult executeEnterStandby(EnterStandbyRequest enterStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(enterStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new EnterStandbyRequestMarshaller().marshall(super.beforeMarshalling(enterStandbyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "EnterStandby");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new EnterStandbyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Executes the specified policy. This can be useful for testing the design of your scaling policy.
*
*
* @param executePolicyRequest
* @return Result of the ExecutePolicy operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ExecutePolicy
* @see AWS API
* Documentation
*/
@Override
public ExecutePolicyResult executePolicy(ExecutePolicyRequest request) {
request = beforeClientExecution(request);
return executeExecutePolicy(request);
}
@SdkInternalApi
final ExecutePolicyResult executeExecutePolicy(ExecutePolicyRequest executePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(executePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExecutePolicyRequestMarshaller().marshall(super.beforeMarshalling(executePolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExecutePolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new ExecutePolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances out of the standby state.
*
*
* After you put the instances back in service, the desired capacity is incremented.
*
*
* For more information, see Temporarily removing
* instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param exitStandbyRequest
* @return Result of the ExitStandby operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ExitStandby
* @see AWS API
* Documentation
*/
@Override
public ExitStandbyResult exitStandby(ExitStandbyRequest request) {
request = beforeClientExecution(request);
return executeExitStandby(request);
}
@SdkInternalApi
final ExitStandbyResult executeExitStandby(ExitStandbyRequest exitStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(exitStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExitStandbyRequestMarshaller().marshall(super.beforeMarshalling(exitStandbyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExitStandby");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new ExitStandbyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Retrieves the forecast data for a predictive scaling policy.
*
*
* Load forecasts are predictions of the hourly load values using historical load data from CloudWatch and an
* analysis of historical trends. Capacity forecasts are represented as predicted values for the minimum capacity
* that is needed on an hourly basis, based on the hourly load forecast.
*
*
* A minimum of 24 hours of data is required to create the initial forecasts. However, having a full 14 days of
* historical data results in more accurate forecasts.
*
*
* For more information, see Predictive
* scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param getPredictiveScalingForecastRequest
* @return Result of the GetPredictiveScalingForecast operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.GetPredictiveScalingForecast
* @see AWS API Documentation
*/
@Override
public GetPredictiveScalingForecastResult getPredictiveScalingForecast(GetPredictiveScalingForecastRequest request) {
request = beforeClientExecution(request);
return executeGetPredictiveScalingForecast(request);
}
@SdkInternalApi
final GetPredictiveScalingForecastResult executeGetPredictiveScalingForecast(GetPredictiveScalingForecastRequest getPredictiveScalingForecastRequest) {
ExecutionContext executionContext = createExecutionContext(getPredictiveScalingForecastRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new GetPredictiveScalingForecastRequestMarshaller().marshall(super.beforeMarshalling(getPredictiveScalingForecastRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetPredictiveScalingForecast");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new GetPredictiveScalingForecastResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a lifecycle hook for the specified Auto Scaling group.
*
*
* A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches
* (before it is put into service) or as the instance terminates (before it is fully terminated).
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when
* Amazon EC2 Auto Scaling launches or terminates instances.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the
* RecordLifecycleActionHeartbeat API call.
*
*
* -
*
* If you finish before the timeout period ends, complete the lifecycle action using the
* CompleteLifecycleAction API call.
*
*
*
*
* For more information, see Amazon EC2 Auto Scaling
* lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call
* fails.
*
*
* You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If
* you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.
*
*
* @param putLifecycleHookRequest
* @return Result of the PutLifecycleHook operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutLifecycleHook
* @see AWS
* API Documentation
*/
@Override
public PutLifecycleHookResult putLifecycleHook(PutLifecycleHookRequest request) {
request = beforeClientExecution(request);
return executePutLifecycleHook(request);
}
@SdkInternalApi
final PutLifecycleHookResult executePutLifecycleHook(PutLifecycleHookRequest putLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(putLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutLifecycleHookRequestMarshaller().marshall(super.beforeMarshalling(putLifecycleHookRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutLifecycleHook");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutLifecycleHookResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the
* specified topic can have messages delivered to an endpoint such as a web server or an email address.
*
*
* This configuration overwrites any existing configuration.
*
*
* For more information, see Getting Amazon SNS
* notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.
*
*
* @param putNotificationConfigurationRequest
* @return Result of the PutNotificationConfiguration operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.PutNotificationConfiguration
* @see AWS API Documentation
*/
@Override
public PutNotificationConfigurationResult putNotificationConfiguration(PutNotificationConfigurationRequest request) {
request = beforeClientExecution(request);
return executePutNotificationConfiguration(request);
}
@SdkInternalApi
final PutNotificationConfigurationResult executePutNotificationConfiguration(PutNotificationConfigurationRequest putNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(putNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutNotificationConfigurationRequestMarshaller().marshall(super.beforeMarshalling(putNotificationConfigurationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutNotificationConfiguration");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutNotificationConfigurationResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a scaling policy for an Auto Scaling group. Scaling policies are used to scale an Auto Scaling
* group based on configurable metrics. If no policies are defined, the dynamic scaling and predictive scaling
* features are not used.
*
*
* For more information about using dynamic scaling, see Target tracking
* scaling policies and Step and simple scaling
* policies in the Amazon EC2 Auto Scaling User Guide.
*
*
* For more information about using predictive scaling, see Predictive
* scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* You can view the scaling policies for an Auto Scaling group using the DescribePolicies API call. If you
* are no longer using a scaling policy, you can delete it by calling the DeletePolicy API.
*
*
* @param putScalingPolicyRequest
* @return Result of the PutScalingPolicy operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.PutScalingPolicy
* @see AWS
* API Documentation
*/
@Override
public PutScalingPolicyResult putScalingPolicy(PutScalingPolicyRequest request) {
request = beforeClientExecution(request);
return executePutScalingPolicy(request);
}
@SdkInternalApi
final PutScalingPolicyResult executePutScalingPolicy(PutScalingPolicyRequest putScalingPolicyRequest) {
ExecutionContext executionContext = createExecutionContext(putScalingPolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutScalingPolicyRequestMarshaller().marshall(super.beforeMarshalling(putScalingPolicyRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutScalingPolicy");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutScalingPolicyResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a scheduled scaling action for an Auto Scaling group.
*
*
* For more information, see Scheduled scaling in the
* Amazon EC2 Auto Scaling User Guide.
*
*
* You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call.
* If you are no longer using a scheduled action, you can delete it by calling the DeleteScheduledAction API.
*
*
* @param putScheduledUpdateGroupActionRequest
* @return Result of the PutScheduledUpdateGroupAction operation returned by the service.
* @throws AlreadyExistsException
* You already have an Auto Scaling group or launch configuration with this name.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutScheduledUpdateGroupAction
* @see AWS API Documentation
*/
@Override
public PutScheduledUpdateGroupActionResult putScheduledUpdateGroupAction(PutScheduledUpdateGroupActionRequest request) {
request = beforeClientExecution(request);
return executePutScheduledUpdateGroupAction(request);
}
@SdkInternalApi
final PutScheduledUpdateGroupActionResult executePutScheduledUpdateGroupAction(PutScheduledUpdateGroupActionRequest putScheduledUpdateGroupActionRequest) {
ExecutionContext executionContext = createExecutionContext(putScheduledUpdateGroupActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutScheduledUpdateGroupActionRequestMarshaller().marshall(super.beforeMarshalling(putScheduledUpdateGroupActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutScheduledUpdateGroupAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new PutScheduledUpdateGroupActionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2
* instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto
* Scaling group can draw on the warm pool to meet its new desired capacity. For more information and example
* configurations, see Warm pools for
* Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*
*
* This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot
* be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration
* that requests Spot Instances.
*
*
* You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer
* using a warm pool, you can delete it by calling the DeleteWarmPool API.
*
*
* @param putWarmPoolRequest
* @return Result of the PutWarmPool operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.PutWarmPool
* @see AWS API
* Documentation
*/
@Override
public PutWarmPoolResult putWarmPool(PutWarmPoolRequest request) {
request = beforeClientExecution(request);
return executePutWarmPool(request);
}
@SdkInternalApi
final PutWarmPoolResult executePutWarmPool(PutWarmPoolRequest putWarmPoolRequest) {
ExecutionContext executionContext = createExecutionContext(putWarmPoolRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutWarmPoolRequestMarshaller().marshall(super.beforeMarshalling(putWarmPoolRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutWarmPool");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(new PutWarmPoolResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the
* timeout by the length of time defined using the PutLifecycleHook API call.
*
*
* This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
*
*
* -
*
* (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when
* Amazon EC2 Auto Scaling launches or terminates instances.
*
*
* -
*
* (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an
* Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
*
*
* -
*
* Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
*
*
* -
*
* If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
*
*
* -
*
* If you finish before the timeout period ends, complete the lifecycle action.
*
*
*
*
* For more information, see Amazon EC2 Auto Scaling
* lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param recordLifecycleActionHeartbeatRequest
* @return Result of the RecordLifecycleActionHeartbeat operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.RecordLifecycleActionHeartbeat
* @see AWS API Documentation
*/
@Override
public RecordLifecycleActionHeartbeatResult recordLifecycleActionHeartbeat(RecordLifecycleActionHeartbeatRequest request) {
request = beforeClientExecution(request);
return executeRecordLifecycleActionHeartbeat(request);
}
@SdkInternalApi
final RecordLifecycleActionHeartbeatResult executeRecordLifecycleActionHeartbeat(RecordLifecycleActionHeartbeatRequest recordLifecycleActionHeartbeatRequest) {
ExecutionContext executionContext = createExecutionContext(recordLifecycleActionHeartbeatRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new RecordLifecycleActionHeartbeatRequestMarshaller().marshall(super.beforeMarshalling(recordLifecycleActionHeartbeatRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "RecordLifecycleActionHeartbeat");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new RecordLifecycleActionHeartbeatResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling
* group.
*
*
* For more information, see Suspending and
* resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param resumeProcessesRequest
* @return Result of the ResumeProcesses operation returned by the service.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.ResumeProcesses
* @see AWS
* API Documentation
*/
@Override
public ResumeProcessesResult resumeProcesses(ResumeProcessesRequest request) {
request = beforeClientExecution(request);
return executeResumeProcesses(request);
}
@SdkInternalApi
final ResumeProcessesResult executeResumeProcesses(ResumeProcessesRequest resumeProcessesRequest) {
ExecutionContext executionContext = createExecutionContext(resumeProcessesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ResumeProcessesRequestMarshaller().marshall(super.beforeMarshalling(resumeProcessesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ResumeProcesses");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new ResumeProcessesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Sets the size of the specified Auto Scaling group.
*
*
* If a scale-in activity occurs as a result of a new DesiredCapacity
value that is lower than the
* current size of the group, the Auto Scaling group uses its termination policy to determine which instances to
* terminate.
*
*
* For more information, see Manual scaling in the
* Amazon EC2 Auto Scaling User Guide.
*
*
* @param setDesiredCapacityRequest
* @return Result of the SetDesiredCapacity operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetDesiredCapacity
* @see AWS
* API Documentation
*/
@Override
public SetDesiredCapacityResult setDesiredCapacity(SetDesiredCapacityRequest request) {
request = beforeClientExecution(request);
return executeSetDesiredCapacity(request);
}
@SdkInternalApi
final SetDesiredCapacityResult executeSetDesiredCapacity(SetDesiredCapacityRequest setDesiredCapacityRequest) {
ExecutionContext executionContext = createExecutionContext(setDesiredCapacityRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetDesiredCapacityRequestMarshaller().marshall(super.beforeMarshalling(setDesiredCapacityRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetDesiredCapacity");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetDesiredCapacityResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Sets the health status of the specified instance.
*
*
* For more information, see Health
* checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param setInstanceHealthRequest
* @return Result of the SetInstanceHealth operation returned by the service.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetInstanceHealth
* @see AWS
* API Documentation
*/
@Override
public SetInstanceHealthResult setInstanceHealth(SetInstanceHealthRequest request) {
request = beforeClientExecution(request);
return executeSetInstanceHealth(request);
}
@SdkInternalApi
final SetInstanceHealthResult executeSetInstanceHealth(SetInstanceHealthRequest setInstanceHealthRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceHealthRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetInstanceHealthRequestMarshaller().marshall(super.beforeMarshalling(setInstanceHealthRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetInstanceHealth");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetInstanceHealthResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the instance protection settings of the specified instances. This operation cannot be called on instances
* in a warm pool.
*
*
* For more information about preventing instances that are part of an Auto Scaling group from terminating on scale
* in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.
*
*
* If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.
*
*
* @param setInstanceProtectionRequest
* @return Result of the SetInstanceProtection operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SetInstanceProtection
* @see AWS API Documentation
*/
@Override
public SetInstanceProtectionResult setInstanceProtection(SetInstanceProtectionRequest request) {
request = beforeClientExecution(request);
return executeSetInstanceProtection(request);
}
@SdkInternalApi
final SetInstanceProtectionResult executeSetInstanceProtection(SetInstanceProtectionRequest setInstanceProtectionRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceProtectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SetInstanceProtectionRequestMarshaller().marshall(super.beforeMarshalling(setInstanceProtectionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SetInstanceProtection");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SetInstanceProtectionResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a new instance refresh operation. An instance refresh performs a rolling replacement of all or some
* instances in an Auto Scaling group. Each instance is terminated first and then replaced, which temporarily
* reduces the capacity available within your Auto Scaling group.
*
*
* This operation is part of the instance refresh
* feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature
* is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch
* template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the
* process of updating instances in the group.
*
*
* If the call succeeds, it creates a new instance refresh request with a unique ID that you can use to track its
* progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes
* that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in
* progress, use the CancelInstanceRefresh API.
*
*
* @param startInstanceRefreshRequest
* @return Result of the StartInstanceRefresh operation returned by the service.
* @throws LimitExceededException
* You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling
* groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws InstanceRefreshInProgressException
* The request failed because an active instance refresh operation already exists for the specified Auto
* Scaling group.
* @sample AmazonAutoScaling.StartInstanceRefresh
* @see AWS API Documentation
*/
@Override
public StartInstanceRefreshResult startInstanceRefresh(StartInstanceRefreshRequest request) {
request = beforeClientExecution(request);
return executeStartInstanceRefresh(request);
}
@SdkInternalApi
final StartInstanceRefreshResult executeStartInstanceRefresh(StartInstanceRefreshRequest startInstanceRefreshRequest) {
ExecutionContext executionContext = createExecutionContext(startInstanceRefreshRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StartInstanceRefreshRequestMarshaller().marshall(super.beforeMarshalling(startInstanceRefreshRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StartInstanceRefresh");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new StartInstanceRefreshResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group.
*
*
* If you suspend either the Launch
or Terminate
process types, it can prevent other
* process types from functioning properly. For more information, see Suspending and
* resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.
*
*
* To resume processes that have been suspended, call the ResumeProcesses API.
*
*
* @param suspendProcessesRequest
* @return Result of the SuspendProcesses operation returned by the service.
* @throws ResourceInUseException
* The operation can't be performed because the resource is in use.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.SuspendProcesses
* @see AWS
* API Documentation
*/
@Override
public SuspendProcessesResult suspendProcesses(SuspendProcessesRequest request) {
request = beforeClientExecution(request);
return executeSuspendProcesses(request);
}
@SdkInternalApi
final SuspendProcessesResult executeSuspendProcesses(SuspendProcessesRequest suspendProcessesRequest) {
ExecutionContext executionContext = createExecutionContext(suspendProcessesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SuspendProcessesRequestMarshaller().marshall(super.beforeMarshalling(suspendProcessesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "SuspendProcesses");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new SuspendProcessesResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called
* on instances in a warm pool.
*
*
* This call simply makes a termination request. The instance is not terminated immediately. When an instance is
* terminated, the instance status changes to terminated
. You can't connect to or start an instance
* after you've terminated it.
*
*
* If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to
* replace the ones that are terminated.
*
*
* By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the
* desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto
* Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more
* information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.
*
*
* @param terminateInstanceInAutoScalingGroupRequest
* @return Result of the TerminateInstanceInAutoScalingGroup operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @sample AmazonAutoScaling.TerminateInstanceInAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public TerminateInstanceInAutoScalingGroupResult terminateInstanceInAutoScalingGroup(TerminateInstanceInAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeTerminateInstanceInAutoScalingGroup(request);
}
@SdkInternalApi
final TerminateInstanceInAutoScalingGroupResult executeTerminateInstanceInAutoScalingGroup(
TerminateInstanceInAutoScalingGroupRequest terminateInstanceInAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(terminateInstanceInAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new TerminateInstanceInAutoScalingGroupRequestMarshaller().marshall(super
.beforeMarshalling(terminateInstanceInAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "TerminateInstanceInAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new TerminateInstanceInAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* We strongly recommend that all Auto Scaling groups use launch templates to ensure full functionality for
* Amazon EC2 Auto Scaling and Amazon EC2.
*
*
* Updates the configuration for the specified Auto Scaling group.
*
*
* To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any
* parameters that you don't specify are not changed by this update request. The new settings take effect on any
* scaling activities after this call returns.
*
*
* If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get
* the updated configuration. Existing instances continue to run with the configuration that they were originally
* launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or
* template, existing instances may be replaced to match the new purchasing options that you specified in the
* policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot
* capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances.
* When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that
* updating your group does not compromise the performance or availability of your application.
*
*
* Note the following about changing DesiredCapacity
, MaxSize
, or MinSize
:
*
*
* -
*
* If a scale-in activity occurs as a result of a new DesiredCapacity
value that is lower than the
* current size of the group, the Auto Scaling group uses its termination policy to determine which instances to
* terminate.
*
*
* -
*
* If you specify a new value for MinSize
without specifying a value for DesiredCapacity
,
* and the new MinSize
is larger than the current size of the group, this sets the group's
* DesiredCapacity
to the new MinSize
value.
*
*
* -
*
* If you specify a new value for MaxSize
without specifying a value for DesiredCapacity
,
* and the new MaxSize
is smaller than the current size of the group, this sets the group's
* DesiredCapacity
to the new MaxSize
value.
*
*
*
*
* To see which parameters have been set, call the DescribeAutoScalingGroups API. To view the scaling
* policies for an Auto Scaling group, call the DescribePolicies API. If the group has scaling policies, you
* can update them by calling the PutScalingPolicy API.
*
*
* @param updateAutoScalingGroupRequest
* @return Result of the UpdateAutoScalingGroup operation returned by the service.
* @throws ScalingActivityInProgressException
* The operation can't be performed because there are scaling activities in progress.
* @throws ResourceContentionException
* You already have a pending update to an Amazon EC2 Auto Scaling resource (for example, an Auto Scaling
* group, instance, or load balancer).
* @throws ServiceLinkedRoleFailureException
* The service-linked role is not yet ready for use.
* @sample AmazonAutoScaling.UpdateAutoScalingGroup
* @see AWS API Documentation
*/
@Override
public UpdateAutoScalingGroupResult updateAutoScalingGroup(UpdateAutoScalingGroupRequest request) {
request = beforeClientExecution(request);
return executeUpdateAutoScalingGroup(request);
}
@SdkInternalApi
final UpdateAutoScalingGroupResult executeUpdateAutoScalingGroup(UpdateAutoScalingGroupRequest updateAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(updateAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateAutoScalingGroupRequestMarshaller().marshall(super.beforeMarshalling(updateAutoScalingGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "Auto Scaling");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateAutoScalingGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
StaxResponseHandler responseHandler = new StaxResponseHandler(
new UpdateAutoScalingGroupResultStaxUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* Returns additional metadata for a previously executed successful, request, typically used for debugging issues
* where a service isn't acting as expected. This data isn't considered part of the result data returned by an
* operation, so it's available through this separate, diagnostic interface.
*
* Response metadata is only cached for a limited period of time, so if you need to access this extra diagnostic
* information for an executed request, you should use this method to retrieve it as soon as possible after
* executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none is available.
*/
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private Response invoke(Request request, HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
return invoke(request, responseHandler, executionContext, null, null);
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private