
com.amazonaws.services.autoscaling.AmazonAutoScalingClient Maven / Gradle / Ivy
Show all versions of aws-android-sdk-autoscaling Show documentation
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.autoscaling;
import org.w3c.dom.*;
import java.util.*;
import java.util.Map.Entry;
import com.amazonaws.*;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.metrics.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.transform.*;
/**
* Client for accessing AmazonAutoScaling. All service calls made
* using this client are blocking, and will not return until the service call
* completes.
*
* Auto Scaling
* Auto Scaling is designed to automatically launch or terminate EC2
* instances based on user-defined policies, schedules, and health
* checks. Use this service in conjunction with the Amazon CloudWatch and
* Elastic Load Balancing services.
*
*/
public class AmazonAutoScalingClient extends AmazonWebServiceClient implements AmazonAutoScaling {
/** Provider for AWS credentials. */
private AWSCredentialsProvider awsCredentialsProvider;
/**
* List of exception unmarshallers for all AmazonAutoScaling exceptions.
*/
protected final List> exceptionUnmarshallers
= new ArrayList>();
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling. A credentials provider chain will be used
* that searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @see DefaultAWSCredentialsProviderChain
*/
@Deprecated
public AmazonAutoScalingClient() {
this(new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling. A credentials provider chain will be used
* that searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonAutoScaling
* (ex: proxy settings, retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
*/
@Deprecated
public AmazonAutoScalingClient(ClientConfiguration clientConfiguration) {
this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentials The AWS credentials (access key ID and secret key) to use
* when authenticating with AWS services.
*/
public AmazonAutoScalingClient(AWSCredentials awsCredentials) {
this(awsCredentials, new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials
* and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentials The AWS credentials (access key ID and secret key) to use
* when authenticating with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonAutoScaling
* (ex: proxy settings, retry counts, etc.).
*/
public AmazonAutoScalingClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
this(new StaticCredentialsProvider(awsCredentials), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials provider.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
*/
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonAutoScaling
* (ex: proxy settings, retry counts, etc.).
*/
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, new UrlHttpClient(clientConfiguration));
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonAutoScaling
* (ex: proxy settings, retry counts, etc.).
* @param httpClient A http client
*/
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration, HttpClient httpClient) {
super(clientConfiguration, httpClient);
this.awsCredentialsProvider = awsCredentialsProvider;
init();
}
/**
* Constructs a new client to invoke service methods on
* AmazonAutoScaling using the specified AWS account credentials
* provider, client configuration options, and request metric collector.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonAutoScaling
* (ex: proxy settings, retry counts, etc.).
* @param requestMetricCollector optional request metric collector
*/
@Deprecated
public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(clientConfiguration, requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
init();
}
private void init() {
exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
exceptionUnmarshallers.add(new InvalidNextTokenExceptionUnmarshaller());
exceptionUnmarshallers.add(new ResourceContentionExceptionUnmarshaller());
exceptionUnmarshallers.add(new ScalingActivityInProgressExceptionUnmarshaller());
exceptionUnmarshallers.add(new ResourceInUseExceptionUnmarshaller());
exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller());
exceptionUnmarshallers.add(new StandardErrorUnmarshaller());
// calling this.setEndPoint(...) will also modify the signer accordingly
this.setEndpoint("autoscaling.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain(
"/com/amazonaws/services/autoscaling/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain(
"/com/amazonaws/services/autoscaling/request.handler2s"));
}
/**
*
* Creates or updates a lifecycle hook for the specified Auto Scaling
* Group.
*
*
* A lifecycle hook tells Auto Scaling that you want to perform an
* action on an instance that is not actively in service; for example,
* either when the instance launches or before the instance terminates.
*
*
* This operation is a part of the basic sequence for adding a lifecycle
* hook to an Auto Scaling group:
*
* - Create a notification target. A target can be either an
* Amazon SQS queue or an Amazon SNS topic.
* - Create an IAM role. This role allows Auto Scaling to publish
* lifecycle notifications to the designated SQS queue or SNS topic.
* - Create the lifecycle hook. You can create a hook that acts
* when instances launch or when instances terminate.
* - If necessary, record the lifecycle action heartbeat to keep the
* instance in a pending state.
* - Complete the lifecycle action.
*
* For more information, see
* Auto Scaling Pending State and Auto Scaling Terminating State
* in the Auto Scaling Developer Guide .
*
*
* If you exceed your maximum limit of lifecycle hooks, which by default
* is 50 per region, the call fails. For information about updating this
* limit, see
* AWS Service Limits
* in the Amazon Web Services General Reference .
*
*
* @param putLifecycleHookRequest Container for the necessary parameters
* to execute the PutLifecycleHook service method on AmazonAutoScaling.
*
* @return The response from the PutLifecycleHook service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public PutLifecycleHookResult putLifecycleHook(PutLifecycleHookRequest putLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(putLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new PutLifecycleHookRequestMarshaller().marshall(putLifecycleHookRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new PutLifecycleHookResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the current Auto Scaling resource limits for your AWS
* account.
*
*
* For information about requesting an increase in these limits, see
* AWS Service Limits
* in the Amazon Web Services General Reference .
*
*
* @param describeAccountLimitsRequest Container for the necessary
* parameters to execute the DescribeAccountLimits service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeAccountLimits service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAccountLimitsResult describeAccountLimits(DescribeAccountLimitsRequest describeAccountLimitsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeAccountLimitsRequestMarshaller().marshall(describeAccountLimitsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeAccountLimitsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes one or more Auto Scaling groups. If a list of names is not
* provided, the call describes all Auto Scaling groups.
*
*
* @param describeAutoScalingGroupsRequest Container for the necessary
* parameters to execute the DescribeAutoScalingGroups service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeAutoScalingGroups service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingGroupsResult describeAutoScalingGroups(DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingGroupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeAutoScalingGroupsRequestMarshaller().marshall(describeAutoScalingGroupsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeAutoScalingGroupsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the policies for the specified Auto Scaling group.
*
*
* @param describePoliciesRequest Container for the necessary parameters
* to execute the DescribePolicies service method on AmazonAutoScaling.
*
* @return The response from the DescribePolicies service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribePoliciesResult describePolicies(DescribePoliciesRequest describePoliciesRequest) {
ExecutionContext executionContext = createExecutionContext(describePoliciesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribePoliciesRequestMarshaller().marshall(describePoliciesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribePoliciesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an Auto Scaling group with the specified name and attributes.
*
*
* If you exceed your maximum limit of Auto Scaling groups, which by
* default is 20 per region, the call fails. For information about
* viewing and updating this limit, see DescribeAccountLimits.
*
*
* For more information, see
* Auto Scaling Groups
* in the Auto Scaling Developer Guide .
*
*
* @param createAutoScalingGroupRequest Container for the necessary
* parameters to execute the CreateAutoScalingGroup service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws LimitExceededException
* @throws AlreadyExistsException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void createAutoScalingGroup(CreateAutoScalingGroupRequest createAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(createAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new CreateAutoScalingGroupRequestMarshaller().marshall(createAutoScalingGroupRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Attaches one or more load balancers to the specified Auto Scaling
* group.
*
*
* To describe the load balancers for an Auto Scaling group, use
* DescribeLoadBalancers. To detach the load balancer from the Auto
* Scaling group, use DetachLoadBalancers.
*
*
* For more information, see
* Attach a Load Balancer to Your Auto Scaling Group
* in the Auto Scaling Developer Guide .
*
*
* @param attachLoadBalancersRequest Container for the necessary
* parameters to execute the AttachLoadBalancers service method on
* AmazonAutoScaling.
*
* @return The response from the AttachLoadBalancers service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public AttachLoadBalancersResult attachLoadBalancers(AttachLoadBalancersRequest attachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(attachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new AttachLoadBalancersRequestMarshaller().marshall(attachLoadBalancersRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new AttachLoadBalancersResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified notification.
*
*
* @param deleteNotificationConfigurationRequest Container for the
* necessary parameters to execute the DeleteNotificationConfiguration
* service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteNotificationConfiguration(DeleteNotificationConfigurationRequest deleteNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteNotificationConfigurationRequestMarshaller().marshall(deleteNotificationConfigurationRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the specified tags.
*
*
* You can use filters to limit the results. For example, you can query
* for the tags for a specific Auto Scaling group. You can specify
* multiple values for a filter. A tag must match at least one of the
* specified values for it to be included in the results.
*
*
* You can also specify multiple filters. The result includes
* information for a particular tag only if it matches all the filters.
* If there's no match, no special message is returned.
*
*
* @param describeTagsRequest Container for the necessary parameters to
* execute the DescribeTags service method on AmazonAutoScaling.
*
* @return The response from the DescribeTags service method, as returned
* by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) {
ExecutionContext executionContext = createExecutionContext(describeTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeTagsRequestMarshaller().marshall(describeTagsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeTagsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Configures an Auto Scaling group to send notifications when specified
* events take place. Subscribers to this topic can have messages for
* events delivered to an endpoint such as a web server or email address.
*
*
* For more information see
* Getting Notifications When Your Auto Scaling Group Changes
* in the Auto Scaling Developer Guide .
*
*
* This configuration overwrites an existing configuration.
*
*
* @param putNotificationConfigurationRequest Container for the necessary
* parameters to execute the PutNotificationConfiguration service method
* on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void putNotificationConfiguration(PutNotificationConfigurationRequest putNotificationConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(putNotificationConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new PutNotificationConfigurationRequestMarshaller().marshall(putNotificationConfigurationRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the available types of lifecycle hooks.
*
*
* @param describeLifecycleHookTypesRequest Container for the necessary
* parameters to execute the DescribeLifecycleHookTypes service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeLifecycleHookTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes(DescribeLifecycleHookTypesRequest describeLifecycleHookTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHookTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeLifecycleHookTypesRequestMarshaller().marshall(describeLifecycleHookTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeLifecycleHookTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes one or more instances from the specified Auto Scaling group.
*
*
* After the instances are detached, you can manage them independently
* from the rest of the Auto Scaling group.
*
*
* If you do not specify the option to decrement the desired capacity,
* Auto Scaling launches instances to replace the ones that are detached.
*
*
* For more information, see
* Detach EC2 Instances from Your Auto Scaling Group
* in the Auto Scaling Developer Guide .
*
*
* @param detachInstancesRequest Container for the necessary parameters
* to execute the DetachInstances service method on AmazonAutoScaling.
*
* @return The response from the DetachInstances service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DetachInstancesResult detachInstances(DetachInstancesRequest detachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(detachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DetachInstancesRequestMarshaller().marshall(detachInstancesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DetachInstancesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified scheduled action.
*
*
* @param deleteScheduledActionRequest Container for the necessary
* parameters to execute the DeleteScheduledAction service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteScheduledAction(DeleteScheduledActionRequest deleteScheduledActionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteScheduledActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteScheduledActionRequestMarshaller().marshall(deleteScheduledActionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Enables monitoring of the specified metrics for the specified Auto
* Scaling group.
*
*
* You can only enable metrics collection if
* InstanceMonitoring
in the launch configuration for the
* group is set to True
.
*
*
* @param enableMetricsCollectionRequest Container for the necessary
* parameters to execute the EnableMetricsCollection service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void enableMetricsCollection(EnableMetricsCollectionRequest enableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(enableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new EnableMetricsCollectionRequestMarshaller().marshall(enableMetricsCollectionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Creates a launch configuration.
*
*
* If you exceed your maximum limit of launch configurations, which by
* default is 100 per region, the call fails. For information about
* viewing and updating this limit, see DescribeAccountLimits.
*
*
* For more information, see
* Launch Configurations
* in the Auto Scaling Developer Guide .
*
*
* @param createLaunchConfigurationRequest Container for the necessary
* parameters to execute the CreateLaunchConfiguration service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws LimitExceededException
* @throws AlreadyExistsException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void createLaunchConfiguration(CreateLaunchConfigurationRequest createLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(createLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new CreateLaunchConfigurationRequestMarshaller().marshall(createLaunchConfigurationRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes one or more Auto Scaling instances. If a list is not
* provided, the call describes all instances.
*
*
* @param describeAutoScalingInstancesRequest Container for the necessary
* parameters to execute the DescribeAutoScalingInstances service method
* on AmazonAutoScaling.
*
* @return The response from the DescribeAutoScalingInstances service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingInstancesResult describeAutoScalingInstances(DescribeAutoScalingInstancesRequest describeAutoScalingInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeAutoScalingInstancesRequestMarshaller().marshall(describeAutoScalingInstancesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeAutoScalingInstancesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the lifecycle hooks for the specified Auto Scaling group.
*
*
* @param describeLifecycleHooksRequest Container for the necessary
* parameters to execute the DescribeLifecycleHooks service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeLifecycleHooks service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLifecycleHooksResult describeLifecycleHooks(DescribeLifecycleHooksRequest describeLifecycleHooksRequest) {
ExecutionContext executionContext = createExecutionContext(describeLifecycleHooksRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeLifecycleHooksRequestMarshaller().marshall(describeLifecycleHooksRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeLifecycleHooksResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the configuration for the specified Auto Scaling group.
*
*
* To update an Auto Scaling group with a launch configuration with
* InstanceMonitoring
set to False
, you must
* first disable the collection of group metrics. Otherwise, you will get
* an error. If you have previously enabled the collection of group
* metrics, you can disable it using DisableMetricsCollection.
*
*
* The new settings are registered upon the completion of this call. Any
* launch configuration settings take effect on any triggers after this
* call returns. Scaling activities that are currently in progress aren't
* affected.
*
*
* Note the following:
*
*
*
* -
* If you specify a new value for MinSize
without
* specifying a value for DesiredCapacity
, and the new
* MinSize
is larger than the current size of the group, we
* implicitly call SetDesiredCapacity to set the size of the group to the
* new value of MinSize
.
*
*
* -
* If you specify a new value for MaxSize
without
* specifying a value for DesiredCapacity
, and the new
* MaxSize
is smaller than the current size of the group, we
* implicitly call SetDesiredCapacity to set the size of the group to the
* new value of MaxSize
.
*
*
* -
* All other optional parameters are left unchanged if not specified.
*
*
*
*
*
* @param updateAutoScalingGroupRequest Container for the necessary
* parameters to execute the UpdateAutoScalingGroup service method on
* AmazonAutoScaling.
*
*
* @throws ScalingActivityInProgressException
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void updateAutoScalingGroup(UpdateAutoScalingGroupRequest updateAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(updateAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new UpdateAutoScalingGroupRequestMarshaller().marshall(updateAutoScalingGroupRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Records a heartbeat for the lifecycle action associated with a
* specific token. This extends the timeout by the length of time defined
* by the HeartbeatTimeout
parameter of PutLifecycleHook.
*
*
* This operation is a part of the basic sequence for adding a lifecycle
* hook to an Auto Scaling group:
*
* - Create a notification target. A target can be either an
* Amazon SQS queue or an Amazon SNS topic.
* - Create an IAM role. This role allows Auto Scaling to publish
* lifecycle notifications to the designated SQS queue or SNS topic.
* - Create the lifecycle hook. You can create a hook that acts when
* instances launch or when instances terminate.
* - If necessary, record the lifecycle action heartbeat to keep
* the instance in a pending state.
* - Complete the lifecycle action.
*
* For more information, see
* Auto Scaling Pending State and Auto Scaling Terminating State
* in the Auto Scaling Developer Guide .
*
*
* @param recordLifecycleActionHeartbeatRequest Container for the
* necessary parameters to execute the RecordLifecycleActionHeartbeat
* service method on AmazonAutoScaling.
*
* @return The response from the RecordLifecycleActionHeartbeat service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public RecordLifecycleActionHeartbeatResult recordLifecycleActionHeartbeat(RecordLifecycleActionHeartbeatRequest recordLifecycleActionHeartbeatRequest) {
ExecutionContext executionContext = createExecutionContext(recordLifecycleActionHeartbeatRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new RecordLifecycleActionHeartbeatRequestMarshaller().marshall(recordLifecycleActionHeartbeatRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new RecordLifecycleActionHeartbeatResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the scaling process types for use with ResumeProcesses and
* SuspendProcesses.
*
*
* @param describeScalingProcessTypesRequest Container for the necessary
* parameters to execute the DescribeScalingProcessTypes service method
* on AmazonAutoScaling.
*
* @return The response from the DescribeScalingProcessTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScalingProcessTypesResult describeScalingProcessTypes(DescribeScalingProcessTypesRequest describeScalingProcessTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingProcessTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeScalingProcessTypesRequestMarshaller().marshall(describeScalingProcessTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeScalingProcessTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the termination policies supported by Auto Scaling.
*
*
* @param describeTerminationPolicyTypesRequest Container for the
* necessary parameters to execute the DescribeTerminationPolicyTypes
* service method on AmazonAutoScaling.
*
* @return The response from the DescribeTerminationPolicyTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes(DescribeTerminationPolicyTypesRequest describeTerminationPolicyTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeTerminationPolicyTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeTerminationPolicyTypesRequestMarshaller().marshall(describeTerminationPolicyTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeTerminationPolicyTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Completes the lifecycle action for the associated token initiated
* under the given lifecycle hook with the specified result.
*
*
* This operation is a part of the basic sequence for adding a lifecycle
* hook to an Auto Scaling group:
*
* - Create a notification target. A target can be either an
* Amazon SQS queue or an Amazon SNS topic.
* - Create an IAM role. This role allows Auto Scaling to publish
* lifecycle notifications to the designated SQS queue or SNS topic.
* - Create the lifecycle hook. You can create a hook that acts when
* instances launch or when instances terminate.
* - If necessary, record the lifecycle action heartbeat to keep the
* instance in a pending state.
* - Complete the lifecycle action .
*
* For more information, see
* Auto Scaling Pending State and Auto Scaling Terminating State
* in the Auto Scaling Developer Guide .
*
*
* @param completeLifecycleActionRequest Container for the necessary
* parameters to execute the CompleteLifecycleAction service method on
* AmazonAutoScaling.
*
* @return The response from the CompleteLifecycleAction service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public CompleteLifecycleActionResult completeLifecycleAction(CompleteLifecycleActionRequest completeLifecycleActionRequest) {
ExecutionContext executionContext = createExecutionContext(completeLifecycleActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new CompleteLifecycleActionRequestMarshaller().marshall(completeLifecycleActionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new CompleteLifecycleActionResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified Auto Scaling group.
*
*
* If the group has instances or scaling activities in progress, you
* must specify the option to force the deletion in order for it to
* succeed.
*
*
* If the group has policies, deleting the group deletes the policies,
* the underlying alarm actions, and any alarm that no longer has an
* associated action.
*
*
* To remove instances from the Auto Scaling group before deleting it,
* call DetachInstances with the list of instances and the option to
* decrement the desired capacity so that Auto Scaling does not launch
* replacement instances.
*
*
* To terminate all instances before deleting the Auto Scaling group,
* call UpdateAutoScalingGroup and set the minimum size and desired
* capacity of the Auto Scaling group to zero.
*
*
* @param deleteAutoScalingGroupRequest Container for the necessary
* parameters to execute the DeleteAutoScalingGroup service method on
* AmazonAutoScaling.
*
*
* @throws ScalingActivityInProgressException
* @throws ResourceContentionException
* @throws ResourceInUseException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteAutoScalingGroup(DeleteAutoScalingGroupRequest deleteAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteAutoScalingGroupRequestMarshaller().marshall(deleteAutoScalingGroupRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the load balancers for the specified Auto Scaling group.
*
*
* @param describeLoadBalancersRequest Container for the necessary
* parameters to execute the DescribeLoadBalancers service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeLoadBalancers service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest describeLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(describeLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeLoadBalancersRequestMarshaller().marshall(describeLoadBalancersRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeLoadBalancersResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes one or more load balancers from the specified Auto Scaling
* group.
*
*
* When you detach a load balancer, it enters the Removing
* state while deregistering the instances in the group. When all
* instances are deregistered, then you can no longer describe the load
* balancer using DescribeLoadBalancers. Note that the instances remain
* running.
*
*
* @param detachLoadBalancersRequest Container for the necessary
* parameters to execute the DetachLoadBalancers service method on
* AmazonAutoScaling.
*
* @return The response from the DetachLoadBalancers service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DetachLoadBalancersResult detachLoadBalancers(DetachLoadBalancersRequest detachLoadBalancersRequest) {
ExecutionContext executionContext = createExecutionContext(detachLoadBalancersRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DetachLoadBalancersRequestMarshaller().marshall(detachLoadBalancersRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DetachLoadBalancersResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a policy for an Auto Scaling group. To update an
* existing policy, use the existing policy name and set the parameters
* you want to change. Any existing parameter not changed in an update to
* an existing policy is not changed in this update request.
*
*
* If you exceed your maximum limit of step adjustments, which by
* default is 20 per region, the call fails. For information about
* updating this limit, see
* AWS Service Limits
* in the Amazon Web Services General Reference .
*
*
* @param putScalingPolicyRequest Container for the necessary parameters
* to execute the PutScalingPolicy service method on AmazonAutoScaling.
*
* @return The response from the PutScalingPolicy service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public PutScalingPolicyResult putScalingPolicy(PutScalingPolicyRequest putScalingPolicyRequest) {
ExecutionContext executionContext = createExecutionContext(putScalingPolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new PutScalingPolicyRequestMarshaller().marshall(putScalingPolicyRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new PutScalingPolicyResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes one or more scaling activities for the specified Auto
* Scaling group. If you omit the ActivityIds
, the call
* returns all activities from the past six weeks. Activities are sorted
* by the start time. Activities still in progress appear first on the
* list.
*
*
* @param describeScalingActivitiesRequest Container for the necessary
* parameters to execute the DescribeScalingActivities service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeScalingActivities service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScalingActivitiesResult describeScalingActivities(DescribeScalingActivitiesRequest describeScalingActivitiesRequest) {
ExecutionContext executionContext = createExecutionContext(describeScalingActivitiesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeScalingActivitiesRequestMarshaller().marshall(describeScalingActivitiesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeScalingActivitiesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances into Standby
mode.
*
*
* For more information, see
* Auto Scaling InService State
* in the Auto Scaling Developer Guide .
*
*
* @param enterStandbyRequest Container for the necessary parameters to
* execute the EnterStandby service method on AmazonAutoScaling.
*
* @return The response from the EnterStandby service method, as returned
* by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public EnterStandbyResult enterStandby(EnterStandbyRequest enterStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(enterStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new EnterStandbyRequestMarshaller().marshall(enterStandbyRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new EnterStandbyResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified tags.
*
*
* @param deleteTagsRequest Container for the necessary parameters to
* execute the DeleteTags service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteTags(DeleteTagsRequest deleteTagsRequest) {
ExecutionContext executionContext = createExecutionContext(deleteTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteTagsRequestMarshaller().marshall(deleteTagsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Creates or updates tags for the specified Auto Scaling group.
*
*
* A tag is defined by its resource ID, resource type, key, value, and
* propagate flag. The value and the propagate flag are optional
* parameters. The only supported resource type is
* auto-scaling-group
, and the resource ID must be the name
* of the group. The PropagateAtLaunch
flag determines
* whether the tag is added to instances launched in the group. Valid
* values are true
or false
.
*
*
* When you specify a tag with a key that already exists, the operation
* overwrites the previous tag definition, and you do not get an error
* message.
*
*
* For more information, see
* Tagging Auto Scaling Groups and Instances
* in the Auto Scaling Developer Guide .
*
*
* @param createOrUpdateTagsRequest Container for the necessary
* parameters to execute the CreateOrUpdateTags service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws LimitExceededException
* @throws AlreadyExistsException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void createOrUpdateTags(CreateOrUpdateTagsRequest createOrUpdateTagsRequest) {
ExecutionContext executionContext = createExecutionContext(createOrUpdateTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new CreateOrUpdateTagsRequestMarshaller().marshall(createOrUpdateTagsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the available CloudWatch metrics for Auto Scaling.
*
*
* Note that the GroupStandbyInstances
metric is not
* returned by default. You must explicitly request this metric when
* calling EnableMetricsCollection.
*
*
* @param describeMetricCollectionTypesRequest Container for the
* necessary parameters to execute the DescribeMetricCollectionTypes
* service method on AmazonAutoScaling.
*
* @return The response from the DescribeMetricCollectionTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes(DescribeMetricCollectionTypesRequest describeMetricCollectionTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeMetricCollectionTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeMetricCollectionTypesRequestMarshaller().marshall(describeMetricCollectionTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeMetricCollectionTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the actions scheduled for your Auto Scaling group that
* haven't run. To describe the actions that have already run, use
* DescribeScalingActivities.
*
*
* @param describeScheduledActionsRequest Container for the necessary
* parameters to execute the DescribeScheduledActions service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeScheduledActions service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScheduledActionsResult describeScheduledActions(DescribeScheduledActionsRequest describeScheduledActionsRequest) {
ExecutionContext executionContext = createExecutionContext(describeScheduledActionsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeScheduledActionsRequestMarshaller().marshall(describeScheduledActionsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeScheduledActionsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Disables monitoring of the specified metrics for the specified Auto
* Scaling group.
*
*
* @param disableMetricsCollectionRequest Container for the necessary
* parameters to execute the DisableMetricsCollection service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void disableMetricsCollection(DisableMetricsCollectionRequest disableMetricsCollectionRequest) {
ExecutionContext executionContext = createExecutionContext(disableMetricsCollectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DisableMetricsCollectionRequestMarshaller().marshall(disableMetricsCollectionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Sets the size of the specified Auto Scaling group.
*
*
* For more information about desired capacity, see
* What Is Auto Scaling?
* in the Auto Scaling Developer Guide .
*
*
* @param setDesiredCapacityRequest Container for the necessary
* parameters to execute the SetDesiredCapacity service method on
* AmazonAutoScaling.
*
*
* @throws ScalingActivityInProgressException
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void setDesiredCapacity(SetDesiredCapacityRequest setDesiredCapacityRequest) {
ExecutionContext executionContext = createExecutionContext(setDesiredCapacityRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new SetDesiredCapacityRequestMarshaller().marshall(setDesiredCapacityRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Deletes the specified launch configuration.
*
*
* The launch configuration must not be attached to an Auto Scaling
* group. When this call completes, the launch configuration is no longer
* available for use.
*
*
* @param deleteLaunchConfigurationRequest Container for the necessary
* parameters to execute the DeleteLaunchConfiguration service method on
* AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws ResourceInUseException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteLaunchConfiguration(DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLaunchConfigurationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteLaunchConfigurationRequestMarshaller().marshall(deleteLaunchConfigurationRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Terminates the specified instance and optionally adjusts the desired
* group size.
*
*
* This call simply makes a termination request. The instance is not
* terminated immediately.
*
*
* @param terminateInstanceInAutoScalingGroupRequest Container for the
* necessary parameters to execute the
* TerminateInstanceInAutoScalingGroup service method on
* AmazonAutoScaling.
*
* @return The response from the TerminateInstanceInAutoScalingGroup
* service method, as returned by AmazonAutoScaling.
*
* @throws ScalingActivityInProgressException
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public TerminateInstanceInAutoScalingGroupResult terminateInstanceInAutoScalingGroup(TerminateInstanceInAutoScalingGroupRequest terminateInstanceInAutoScalingGroupRequest) {
ExecutionContext executionContext = createExecutionContext(terminateInstanceInAutoScalingGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new TerminateInstanceInAutoScalingGroupRequestMarshaller().marshall(terminateInstanceInAutoScalingGroupRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new TerminateInstanceInAutoScalingGroupResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified Auto Scaling policy.
*
*
* Deleting a policy deletes the underlying alarm action, but does not
* delete the alarm, even if it no longer has an associated action.
*
*
* @param deletePolicyRequest Container for the necessary parameters to
* execute the DeletePolicy service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deletePolicy(DeletePolicyRequest deletePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(deletePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeletePolicyRequestMarshaller().marshall(deletePolicyRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Sets the health status of the specified instance.
*
*
* For more information, see
* Health Checks
* in the Auto Scaling Developer Guide .
*
*
* @param setInstanceHealthRequest Container for the necessary parameters
* to execute the SetInstanceHealth service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void setInstanceHealth(SetInstanceHealthRequest setInstanceHealthRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceHealthRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new SetInstanceHealthRequestMarshaller().marshall(setInstanceHealthRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Executes the specified policy.
*
*
* @param executePolicyRequest Container for the necessary parameters to
* execute the ExecutePolicy service method on AmazonAutoScaling.
*
*
* @throws ScalingActivityInProgressException
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void executePolicy(ExecutePolicyRequest executePolicyRequest) {
ExecutionContext executionContext = createExecutionContext(executePolicyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new ExecutePolicyRequestMarshaller().marshall(executePolicyRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the notification types that are supported by Auto Scaling.
*
*
* @param describeAutoScalingNotificationTypesRequest Container for the
* necessary parameters to execute the
* DescribeAutoScalingNotificationTypes service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeAutoScalingNotificationTypes
* service method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes(DescribeAutoScalingNotificationTypesRequest describeAutoScalingNotificationTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAutoScalingNotificationTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeAutoScalingNotificationTypesRequestMarshaller().marshall(describeAutoScalingNotificationTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeAutoScalingNotificationTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Suspends the specified Auto Scaling processes for the specified Auto
* Scaling group. To suspend specific processes, use the
* ScalingProcesses
parameter. To suspend all processes,
* omit the ScalingProcesses
parameter.
*
*
* Note that if you suspend either the Launch
or
* Terminate
process types, it can prevent other process
* types from functioning properly.
*
*
* To resume processes that have been suspended, use ResumeProcesses.
*
*
* For more information, see
* Suspend and Resume Auto Scaling Processes
* in the Auto Scaling Developer Guide .
*
*
* @param suspendProcessesRequest Container for the necessary parameters
* to execute the SuspendProcesses service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws ResourceInUseException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void suspendProcesses(SuspendProcessesRequest suspendProcessesRequest) {
ExecutionContext executionContext = createExecutionContext(suspendProcessesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new SuspendProcessesRequestMarshaller().marshall(suspendProcessesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Deletes the specified lifecycle hook.
*
*
* If there are any outstanding lifecycle actions, they are completed
* first ( ABANDON
for launching instances,
* CONTINUE
for terminating instances).
*
*
* @param deleteLifecycleHookRequest Container for the necessary
* parameters to execute the DeleteLifecycleHook service method on
* AmazonAutoScaling.
*
* @return The response from the DeleteLifecycleHook service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DeleteLifecycleHookResult deleteLifecycleHook(DeleteLifecycleHookRequest deleteLifecycleHookRequest) {
ExecutionContext executionContext = createExecutionContext(deleteLifecycleHookRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DeleteLifecycleHookRequestMarshaller().marshall(deleteLifecycleHookRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DeleteLifecycleHookResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Resumes the specified suspended Auto Scaling processes for the
* specified Auto Scaling group. To resume specific processes, use the
* ScalingProcesses
parameter. To resume all processes, omit
* the ScalingProcesses
parameter. For more information, see
* Suspend and Resume Auto Scaling Processes
* in the Auto Scaling Developer Guide .
*
*
* @param resumeProcessesRequest Container for the necessary parameters
* to execute the ResumeProcesses service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws ResourceInUseException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void resumeProcesses(ResumeProcessesRequest resumeProcessesRequest) {
ExecutionContext executionContext = createExecutionContext(resumeProcessesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new ResumeProcessesRequestMarshaller().marshall(resumeProcessesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the policy adjustment types for use with PutScalingPolicy.
*
*
* @param describeAdjustmentTypesRequest Container for the necessary
* parameters to execute the DescribeAdjustmentTypes service method on
* AmazonAutoScaling.
*
* @return The response from the DescribeAdjustmentTypes service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAdjustmentTypesResult describeAdjustmentTypes(DescribeAdjustmentTypesRequest describeAdjustmentTypesRequest) {
ExecutionContext executionContext = createExecutionContext(describeAdjustmentTypesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeAdjustmentTypesRequestMarshaller().marshall(describeAdjustmentTypesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeAdjustmentTypesResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the notification actions associated with the specified Auto
* Scaling group.
*
*
* @param describeNotificationConfigurationsRequest Container for the
* necessary parameters to execute the DescribeNotificationConfigurations
* service method on AmazonAutoScaling.
*
* @return The response from the DescribeNotificationConfigurations
* service method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeNotificationConfigurationsResult describeNotificationConfigurations(DescribeNotificationConfigurationsRequest describeNotificationConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeNotificationConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeNotificationConfigurationsRequestMarshaller().marshall(describeNotificationConfigurationsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeNotificationConfigurationsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Moves the specified instances out of Standby
mode.
*
*
* For more information, see
* Auto Scaling InService State
* in the Auto Scaling Developer Guide .
*
*
* @param exitStandbyRequest Container for the necessary parameters to
* execute the ExitStandby service method on AmazonAutoScaling.
*
* @return The response from the ExitStandby service method, as returned
* by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public ExitStandbyResult exitStandby(ExitStandbyRequest exitStandbyRequest) {
ExecutionContext executionContext = createExecutionContext(exitStandbyRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new ExitStandbyRequestMarshaller().marshall(exitStandbyRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new ExitStandbyResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes one or more launch configurations. If you omit the list of
* names, then the call describes all launch configurations.
*
*
* @param describeLaunchConfigurationsRequest Container for the necessary
* parameters to execute the DescribeLaunchConfigurations service method
* on AmazonAutoScaling.
*
* @return The response from the DescribeLaunchConfigurations service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLaunchConfigurationsResult describeLaunchConfigurations(DescribeLaunchConfigurationsRequest describeLaunchConfigurationsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLaunchConfigurationsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new DescribeLaunchConfigurationsRequestMarshaller().marshall(describeLaunchConfigurationsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new DescribeLaunchConfigurationsResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the instance protection settings of the specified instances.
*
*
* For more information, see
* Instance Protection
* in the Auto Scaling Developer Guide .
*
*
* @param setInstanceProtectionRequest Container for the necessary
* parameters to execute the SetInstanceProtection service method on
* AmazonAutoScaling.
*
* @return The response from the SetInstanceProtection service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public SetInstanceProtectionResult setInstanceProtection(SetInstanceProtectionRequest setInstanceProtectionRequest) {
ExecutionContext executionContext = createExecutionContext(setInstanceProtectionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
Response response = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new SetInstanceProtectionRequestMarshaller().marshall(setInstanceProtectionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
response = invoke(request, new SetInstanceProtectionResultStaxUnmarshaller(), executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates or updates a scheduled scaling action for an Auto Scaling
* group. When updating a scheduled scaling action, if you leave a
* parameter unspecified, the corresponding value remains unchanged in
* the affected Auto Scaling group.
*
*
* For more information, see
* Scheduled Scaling
* in the Auto Scaling Developer Guide .
*
*
* @param putScheduledUpdateGroupActionRequest Container for the
* necessary parameters to execute the PutScheduledUpdateGroupAction
* service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
* @throws LimitExceededException
* @throws AlreadyExistsException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void putScheduledUpdateGroupAction(PutScheduledUpdateGroupActionRequest putScheduledUpdateGroupActionRequest) {
ExecutionContext executionContext = createExecutionContext(putScheduledUpdateGroupActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new PutScheduledUpdateGroupActionRequestMarshaller().marshall(putScheduledUpdateGroupActionRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Attaches one or more EC2 instances to the specified Auto Scaling
* group.
*
*
* When you attach instances, Auto Scaling increases the desired
* capacity of the group by the number of instances being attached. If
* the number of instances being attached plus the desired capacity of
* the group exceeds the maximum size of the group, the operation fails.
*
*
* For more information, see
* Attach EC2 Instances to Your Auto Scaling Group
* in the Auto Scaling Developer Guide .
*
*
* @param attachInstancesRequest Container for the necessary parameters
* to execute the AttachInstances service method on AmazonAutoScaling.
*
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public void attachInstances(AttachInstancesRequest attachInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(attachInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request = null;
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
try {
request = new AttachInstancesRequestMarshaller().marshall(attachInstancesRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
invoke(request, null, executionContext);
} finally {
endClientExecution(awsRequestMetrics, request, null);
}
}
/**
*
* Describes the current Auto Scaling resource limits for your AWS
* account.
*
*
* For information about requesting an increase in these limits, see
* AWS Service Limits
* in the Amazon Web Services General Reference .
*
*
* @return The response from the DescribeAccountLimits service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAccountLimitsResult describeAccountLimits() throws AmazonServiceException, AmazonClientException {
return describeAccountLimits(new DescribeAccountLimitsRequest());
}
/**
*
* Describes one or more Auto Scaling groups. If a list of names is not
* provided, the call describes all Auto Scaling groups.
*
*
* @return The response from the DescribeAutoScalingGroups service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingGroupsResult describeAutoScalingGroups() throws AmazonServiceException, AmazonClientException {
return describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest());
}
/**
*
* Describes the policies for the specified Auto Scaling group.
*
*
* @return The response from the DescribePolicies service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribePoliciesResult describePolicies() throws AmazonServiceException, AmazonClientException {
return describePolicies(new DescribePoliciesRequest());
}
/**
*
* Attaches one or more load balancers to the specified Auto Scaling
* group.
*
*
* To describe the load balancers for an Auto Scaling group, use
* DescribeLoadBalancers. To detach the load balancer from the Auto
* Scaling group, use DetachLoadBalancers.
*
*
* For more information, see
* Attach a Load Balancer to Your Auto Scaling Group
* in the Auto Scaling Developer Guide .
*
*
* @return The response from the AttachLoadBalancers service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public AttachLoadBalancersResult attachLoadBalancers() throws AmazonServiceException, AmazonClientException {
return attachLoadBalancers(new AttachLoadBalancersRequest());
}
/**
*
* Describes the specified tags.
*
*
* You can use filters to limit the results. For example, you can query
* for the tags for a specific Auto Scaling group. You can specify
* multiple values for a filter. A tag must match at least one of the
* specified values for it to be included in the results.
*
*
* You can also specify multiple filters. The result includes
* information for a particular tag only if it matches all the filters.
* If there's no match, no special message is returned.
*
*
* @return The response from the DescribeTags service method, as returned
* by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeTagsResult describeTags() throws AmazonServiceException, AmazonClientException {
return describeTags(new DescribeTagsRequest());
}
/**
*
* Describes the available types of lifecycle hooks.
*
*
* @return The response from the DescribeLifecycleHookTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLifecycleHookTypesResult describeLifecycleHookTypes() throws AmazonServiceException, AmazonClientException {
return describeLifecycleHookTypes(new DescribeLifecycleHookTypesRequest());
}
/**
*
* Describes one or more Auto Scaling instances. If a list is not
* provided, the call describes all instances.
*
*
* @return The response from the DescribeAutoScalingInstances service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingInstancesResult describeAutoScalingInstances() throws AmazonServiceException, AmazonClientException {
return describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest());
}
/**
*
* Describes the scaling process types for use with ResumeProcesses and
* SuspendProcesses.
*
*
* @return The response from the DescribeScalingProcessTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScalingProcessTypesResult describeScalingProcessTypes() throws AmazonServiceException, AmazonClientException {
return describeScalingProcessTypes(new DescribeScalingProcessTypesRequest());
}
/**
*
* Describes the termination policies supported by Auto Scaling.
*
*
* @return The response from the DescribeTerminationPolicyTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes() throws AmazonServiceException, AmazonClientException {
return describeTerminationPolicyTypes(new DescribeTerminationPolicyTypesRequest());
}
/**
*
* Removes one or more load balancers from the specified Auto Scaling
* group.
*
*
* When you detach a load balancer, it enters the Removing
* state while deregistering the instances in the group. When all
* instances are deregistered, then you can no longer describe the load
* balancer using DescribeLoadBalancers. Note that the instances remain
* running.
*
*
* @return The response from the DetachLoadBalancers service method, as
* returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DetachLoadBalancersResult detachLoadBalancers() throws AmazonServiceException, AmazonClientException {
return detachLoadBalancers(new DetachLoadBalancersRequest());
}
/**
*
* Describes one or more scaling activities for the specified Auto
* Scaling group. If you omit the ActivityIds
, the call
* returns all activities from the past six weeks. Activities are sorted
* by the start time. Activities still in progress appear first on the
* list.
*
*
* @return The response from the DescribeScalingActivities service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScalingActivitiesResult describeScalingActivities() throws AmazonServiceException, AmazonClientException {
return describeScalingActivities(new DescribeScalingActivitiesRequest());
}
/**
*
* Describes the available CloudWatch metrics for Auto Scaling.
*
*
* Note that the GroupStandbyInstances
metric is not
* returned by default. You must explicitly request this metric when
* calling EnableMetricsCollection.
*
*
* @return The response from the DescribeMetricCollectionTypes service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeMetricCollectionTypesResult describeMetricCollectionTypes() throws AmazonServiceException, AmazonClientException {
return describeMetricCollectionTypes(new DescribeMetricCollectionTypesRequest());
}
/**
*
* Describes the actions scheduled for your Auto Scaling group that
* haven't run. To describe the actions that have already run, use
* DescribeScalingActivities.
*
*
* @return The response from the DescribeScheduledActions service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeScheduledActionsResult describeScheduledActions() throws AmazonServiceException, AmazonClientException {
return describeScheduledActions(new DescribeScheduledActionsRequest());
}
/**
*
* Describes the notification types that are supported by Auto Scaling.
*
*
* @return The response from the DescribeAutoScalingNotificationTypes
* service method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes() throws AmazonServiceException, AmazonClientException {
return describeAutoScalingNotificationTypes(new DescribeAutoScalingNotificationTypesRequest());
}
/**
*
* Describes the policy adjustment types for use with PutScalingPolicy.
*
*
* @return The response from the DescribeAdjustmentTypes service method,
* as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeAdjustmentTypesResult describeAdjustmentTypes() throws AmazonServiceException, AmazonClientException {
return describeAdjustmentTypes(new DescribeAdjustmentTypesRequest());
}
/**
*
* Describes the notification actions associated with the specified Auto
* Scaling group.
*
*
* @return The response from the DescribeNotificationConfigurations
* service method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeNotificationConfigurationsResult describeNotificationConfigurations() throws AmazonServiceException, AmazonClientException {
return describeNotificationConfigurations(new DescribeNotificationConfigurationsRequest());
}
/**
*
* Describes one or more launch configurations. If you omit the list of
* names, then the call describes all launch configurations.
*
*
* @return The response from the DescribeLaunchConfigurations service
* method, as returned by AmazonAutoScaling.
*
* @throws ResourceContentionException
* @throws InvalidNextTokenException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonAutoScaling indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeLaunchConfigurationsResult describeLaunchConfigurations() throws AmazonServiceException, AmazonClientException {
return describeLaunchConfigurations(new DescribeLaunchConfigurationsRequest());
}
/**
* Returns additional metadata for a previously executed successful, request, typically used for
* debugging issues where a service isn't acting as expected. This data isn't considered part
* of the result data returned by an operation, so it's available through this separate,
* diagnostic interface.
*
* Response metadata is only cached for a limited period of time, so if you need to access
* this extra diagnostic information for an executed request, you should use this method
* to retrieve it as soon as possible after executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none
* is available.
*
* @deprecated ResponseMetadata cache can hold up to 50 requests and
* responses in memory and will cause memory issue. This method
* now always returns null.
*/
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
private Response invoke(Request request,
Unmarshaller unmarshaller,
ExecutionContext executionContext)
{
request.setEndpoint(endpoint);
request.setTimeOffset(timeOffset);
AmazonWebServiceRequest originalRequest = request.getOriginalRequest();
AWSCredentials credentials = awsCredentialsProvider.getCredentials();
if (originalRequest.getRequestCredentials() != null) {
credentials = originalRequest.getRequestCredentials();
}
executionContext.setCredentials(credentials);
StaxResponseHandler responseHandler = new StaxResponseHandler(unmarshaller);
DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(exceptionUnmarshallers);
return client.execute(request, responseHandler, errorResponseHandler, executionContext);
}
}