com.amazonaws.services.autoscaling.AmazonAutoScalingClient Maven / Gradle / Ivy
Show all versions of aws-java-sdk-osgi Show documentation
/*
 * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
 * Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *  http://aws.amazon.com/apache2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */
package com.amazonaws.services.autoscaling;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import java.util.Map.Entry;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.transform.*;
/**
 * Client for accessing Auto Scaling. All service calls made using this client
 * are blocking, and will not return until the service call completes.
 * 
 * Auto Scaling 
 * 
 * Auto Scaling is designed to automatically launch or terminate EC2 instances
 * based on user-defined policies, schedules, and health checks. Use this
 * service in conjunction with the Amazon CloudWatch and Elastic Load Balancing
 * services.
 * 
 */
@ThreadSafe
public class AmazonAutoScalingClient extends AmazonWebServiceClient implements
        AmazonAutoScaling {
    /** Provider for AWS credentials. */
    private AWSCredentialsProvider awsCredentialsProvider;
    private static final Log log = LogFactory.getLog(AmazonAutoScaling.class);
    /** Default signing name for the service. */
    private static final String DEFAULT_SIGNING_NAME = "autoscaling";
    /** The region metadata service name for computing region endpoints. */
    private static final String DEFAULT_ENDPOINT_PREFIX = "autoscaling";
    /**
     * Client configuration factory providing ClientConfigurations tailored to
     * this client
     */
    protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
    /**
     * List of exception unmarshallers for all modeled exceptions
     */
    protected final List> exceptionUnmarshallers = new ArrayList>();
    /**
     * Constructs a new client to invoke service methods on Auto Scaling. A
     * credentials provider chain will be used that searches for credentials in
     * this order:
     * 
     * - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
 
     * - Java System Properties - aws.accessKeyId and aws.secretKey
 
     * - Instance profile credentials delivered through the Amazon EC2
     * metadata service
 
     * 
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @see DefaultAWSCredentialsProviderChain
     */
    public AmazonAutoScalingClient() {
        this(new DefaultAWSCredentialsProviderChain(), configFactory
                .getConfig());
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling. A
     * credentials provider chain will be used that searches for credentials in
     * this order:
     * 
     * - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
 
     * - Java System Properties - aws.accessKeyId and aws.secretKey
 
     * - Instance profile credentials delivered through the Amazon EC2
     * metadata service
 
     * 
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param clientConfiguration
     *        The client configuration options controlling how this client
     *        connects to Auto Scaling (ex: proxy settings, retry counts, etc.).
     *
     * @see DefaultAWSCredentialsProviderChain
     */
    public AmazonAutoScalingClient(ClientConfiguration clientConfiguration) {
        this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling using
     * the specified AWS account credentials.
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param awsCredentials
     *        The AWS credentials (access key ID and secret key) to use when
     *        authenticating with AWS services.
     */
    public AmazonAutoScalingClient(AWSCredentials awsCredentials) {
        this(awsCredentials, configFactory.getConfig());
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling using
     * the specified AWS account credentials and client configuration options.
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param awsCredentials
     *        The AWS credentials (access key ID and secret key) to use when
     *        authenticating with AWS services.
     * @param clientConfiguration
     *        The client configuration options controlling how this client
     *        connects to Auto Scaling (ex: proxy settings, retry counts, etc.).
     */
    public AmazonAutoScalingClient(AWSCredentials awsCredentials,
            ClientConfiguration clientConfiguration) {
        super(clientConfiguration);
        this.awsCredentialsProvider = new StaticCredentialsProvider(
                awsCredentials);
        init();
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling using
     * the specified AWS account credentials provider.
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param awsCredentialsProvider
     *        The AWS credentials provider which will provide credentials to
     *        authenticate requests with AWS services.
     */
    public AmazonAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider) {
        this(awsCredentialsProvider, configFactory.getConfig());
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling using
     * the specified AWS account credentials provider and client configuration
     * options.
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param awsCredentialsProvider
     *        The AWS credentials provider which will provide credentials to
     *        authenticate requests with AWS services.
     * @param clientConfiguration
     *        The client configuration options controlling how this client
     *        connects to Auto Scaling (ex: proxy settings, retry counts, etc.).
     */
    public AmazonAutoScalingClient(
            AWSCredentialsProvider awsCredentialsProvider,
            ClientConfiguration clientConfiguration) {
        this(awsCredentialsProvider, clientConfiguration, null);
    }
    /**
     * Constructs a new client to invoke service methods on Auto Scaling using
     * the specified AWS account credentials provider, client configuration
     * options, and request metric collector.
     *
     * 
     * All service calls made using this new client object are blocking, and
     * will not return until the service call completes.
     *
     * @param awsCredentialsProvider
     *        The AWS credentials provider which will provide credentials to
     *        authenticate requests with AWS services.
     * @param clientConfiguration
     *        The client configuration options controlling how this client
     *        connects to Auto Scaling (ex: proxy settings, retry counts, etc.).
     * @param requestMetricCollector
     *        optional request metric collector
     */
    public AmazonAutoScalingClient(
            AWSCredentialsProvider awsCredentialsProvider,
            ClientConfiguration clientConfiguration,
            RequestMetricCollector requestMetricCollector) {
        super(clientConfiguration, requestMetricCollector);
        this.awsCredentialsProvider = awsCredentialsProvider;
        init();
    }
    private void init() {
        exceptionUnmarshallers.add(new ResourceInUseExceptionUnmarshaller());
        exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
        exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller());
        exceptionUnmarshallers.add(new InvalidNextTokenExceptionUnmarshaller());
        exceptionUnmarshallers
                .add(new ResourceContentionExceptionUnmarshaller());
        exceptionUnmarshallers
                .add(new ScalingActivityInProgressExceptionUnmarshaller());
        exceptionUnmarshallers.add(new StandardErrorUnmarshaller());
        setServiceNameIntern(DEFAULT_SIGNING_NAME);
        setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX);
        // calling this.setEndPoint(...) will also modify the signer accordingly
        this.setEndpoint("https://autoscaling.amazonaws.com");
        HandlerChainFactory chainFactory = new HandlerChainFactory();
        requestHandler2s
                .addAll(chainFactory
                        .newRequestHandlerChain("/com/amazonaws/services/autoscaling/request.handlers"));
        requestHandler2s
                .addAll(chainFactory
                        .newRequestHandler2Chain("/com/amazonaws/services/autoscaling/request.handler2s"));
    }
    /**
     * 
     * Attaches one or more EC2 instances to the specified Auto Scaling group.
     * 
     * 
     * When you attach instances, Auto Scaling increases the desired capacity of
     * the group by the number of instances being attached. If the number of
     * instances being attached plus the desired capacity of the group exceeds
     * the maximum size of the group, the operation fails.
     * 
     * 
     * For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Auto
     * Scaling Developer Guide.
     * 
     * 
     * @param attachInstancesRequest
     * @return Result of the AttachInstances operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.AttachInstances
     */
    @Override
    public AttachInstancesResult attachInstances(
            AttachInstancesRequest attachInstancesRequest) {
        ExecutionContext executionContext = createExecutionContext(attachInstancesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new AttachInstancesRequestMarshaller().marshall(super
                        .beforeMarshalling(attachInstancesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new AttachInstancesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Attaches one or more load balancers to the specified Auto Scaling group.
     * 
     * 
     * To describe the load balancers for an Auto Scaling group, use
     * DescribeLoadBalancers. To detach the load balancer from the Auto
     * Scaling group, use DetachLoadBalancers.
     * 
     * 
     * For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto
     * Scaling Developer Guide.
     * 
     * 
     * @param attachLoadBalancersRequest
     * @return Result of the AttachLoadBalancers operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.AttachLoadBalancers
     */
    @Override
    public AttachLoadBalancersResult attachLoadBalancers(
            AttachLoadBalancersRequest attachLoadBalancersRequest) {
        ExecutionContext executionContext = createExecutionContext(attachLoadBalancersRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new AttachLoadBalancersRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(attachLoadBalancersRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new AttachLoadBalancersResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public AttachLoadBalancersResult attachLoadBalancers() {
        return attachLoadBalancers(new AttachLoadBalancersRequest());
    }
    /**
     * 
     * Completes the lifecycle action for the specified token or instance with
     * the specified result.
     * 
     * 
     * This step is a part of the procedure for adding a lifecycle hook to an
     * Auto Scaling group:
     * 
     * 
     * - (Optional) Create a Lambda function and a rule that allows CloudWatch
     * Events to invoke your Lambda function when Auto Scaling launches or
     * terminates instances.
 
     * - (Optional) Create a notification target and an IAM role. The target
     * can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
     * Auto Scaling to publish lifecycle notifications to the target.
 
     * - Create the lifecycle hook. Specify whether the hook is used when the
     * instances launch or terminate.
 
     * - If you need more time, record the lifecycle action heartbeat to keep
     * the instance in a pending state.
 
     * - If you finish before the timeout period ends, complete the
     * lifecycle action.
 
     * 
     * 
     * For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.
     * 
     * 
     * @param completeLifecycleActionRequest
     * @return Result of the CompleteLifecycleAction operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.CompleteLifecycleAction
     */
    @Override
    public CompleteLifecycleActionResult completeLifecycleAction(
            CompleteLifecycleActionRequest completeLifecycleActionRequest) {
        ExecutionContext executionContext = createExecutionContext(completeLifecycleActionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new CompleteLifecycleActionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(completeLifecycleActionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new CompleteLifecycleActionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates an Auto Scaling group with the specified name and attributes.
     * 
     * 
     * If you exceed your maximum limit of Auto Scaling groups, which by default
     * is 20 per region, the call fails. For information about viewing and
     * updating this limit, see DescribeAccountLimits.
     * 
     * 
     * For more information, see Auto Scaling Groups in the Auto Scaling Developer Guide.
     * 
     * 
     * @param createAutoScalingGroupRequest
     * @return Result of the CreateAutoScalingGroup operation returned by the
     *         service.
     * @throws AlreadyExistsException
     *         You already have an Auto Scaling group or launch configuration
     *         with this name.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.CreateAutoScalingGroup
     */
    @Override
    public CreateAutoScalingGroupResult createAutoScalingGroup(
            CreateAutoScalingGroupRequest createAutoScalingGroupRequest) {
        ExecutionContext executionContext = createExecutionContext(createAutoScalingGroupRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new CreateAutoScalingGroupRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(createAutoScalingGroupRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new CreateAutoScalingGroupResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates a launch configuration.
     * 
     * 
     * If you exceed your maximum limit of launch configurations, which by
     * default is 100 per region, the call fails. For information about viewing
     * and updating this limit, see DescribeAccountLimits.
     * 
     * 
     * For more information, see Launch Configurations in the Auto Scaling Developer Guide.
     * 
     * 
     * @param createLaunchConfigurationRequest
     * @return Result of the CreateLaunchConfiguration operation returned by the
     *         service.
     * @throws AlreadyExistsException
     *         You already have an Auto Scaling group or launch configuration
     *         with this name.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.CreateLaunchConfiguration
     */
    @Override
    public CreateLaunchConfigurationResult createLaunchConfiguration(
            CreateLaunchConfigurationRequest createLaunchConfigurationRequest) {
        ExecutionContext executionContext = createExecutionContext(createLaunchConfigurationRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new CreateLaunchConfigurationRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(createLaunchConfigurationRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new CreateLaunchConfigurationResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates or updates tags for the specified Auto Scaling group.
     * 
     * 
     * When you specify a tag with a key that already exists, the operation
     * overwrites the previous tag definition, and you do not get an error
     * message.
     * 
     * 
     * For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling
     * Developer Guide.
     * 
     * 
     * @param createOrUpdateTagsRequest
     * @return Result of the CreateOrUpdateTags operation returned by the
     *         service.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws AlreadyExistsException
     *         You already have an Auto Scaling group or launch configuration
     *         with this name.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.CreateOrUpdateTags
     */
    @Override
    public CreateOrUpdateTagsResult createOrUpdateTags(
            CreateOrUpdateTagsRequest createOrUpdateTagsRequest) {
        ExecutionContext executionContext = createExecutionContext(createOrUpdateTagsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new CreateOrUpdateTagsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(createOrUpdateTagsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new CreateOrUpdateTagsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified Auto Scaling group.
     * 
     * 
     * If the group has instances or scaling activities in progress, you must
     * specify the option to force the deletion in order for it to succeed.
     * 
     * 
     * If the group has policies, deleting the group deletes the policies, the
     * underlying alarm actions, and any alarm that no longer has an associated
     * action.
     * 
     * 
     * To remove instances from the Auto Scaling group before deleting it, call
     * DetachInstances with the list of instances and the option to
     * decrement the desired capacity so that Auto Scaling does not launch
     * replacement instances.
     * 
     * 
     * To terminate all instances before deleting the Auto Scaling group, call
     * UpdateAutoScalingGroup and set the minimum size and desired
     * capacity of the Auto Scaling group to zero.
     * 
     * 
     * @param deleteAutoScalingGroupRequest
     * @return Result of the DeleteAutoScalingGroup operation returned by the
     *         service.
     * @throws ScalingActivityInProgressException
     *         The Auto Scaling group can't be deleted because there are scaling
     *         activities in progress.
     * @throws ResourceInUseException
     *         The Auto Scaling group or launch configuration can't be deleted
     *         because it is in use.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteAutoScalingGroup
     */
    @Override
    public DeleteAutoScalingGroupResult deleteAutoScalingGroup(
            DeleteAutoScalingGroupRequest deleteAutoScalingGroupRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteAutoScalingGroupRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteAutoScalingGroupRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(deleteAutoScalingGroupRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteAutoScalingGroupResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified launch configuration.
     * 
     * 
     * The launch configuration must not be attached to an Auto Scaling group.
     * When this call completes, the launch configuration is no longer available
     * for use.
     * 
     * 
     * @param deleteLaunchConfigurationRequest
     * @return Result of the DeleteLaunchConfiguration operation returned by the
     *         service.
     * @throws ResourceInUseException
     *         The Auto Scaling group or launch configuration can't be deleted
     *         because it is in use.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteLaunchConfiguration
     */
    @Override
    public DeleteLaunchConfigurationResult deleteLaunchConfiguration(
            DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteLaunchConfigurationRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteLaunchConfigurationRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(deleteLaunchConfigurationRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteLaunchConfigurationResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified lifecycle hook.
     * 
     * 
     * If there are any outstanding lifecycle actions, they are completed first
     * (ABANDON for launching instances, CONTINUE for
     * terminating instances).
     * 
     * 
     * @param deleteLifecycleHookRequest
     * @return Result of the DeleteLifecycleHook operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteLifecycleHook
     */
    @Override
    public DeleteLifecycleHookResult deleteLifecycleHook(
            DeleteLifecycleHookRequest deleteLifecycleHookRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteLifecycleHookRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteLifecycleHookRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(deleteLifecycleHookRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteLifecycleHookResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified notification.
     * 
     * 
     * @param deleteNotificationConfigurationRequest
     * @return Result of the DeleteNotificationConfiguration operation returned
     *         by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteNotificationConfiguration
     */
    @Override
    public DeleteNotificationConfigurationResult deleteNotificationConfiguration(
            DeleteNotificationConfigurationRequest deleteNotificationConfigurationRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteNotificationConfigurationRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteNotificationConfigurationRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(deleteNotificationConfigurationRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteNotificationConfigurationResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified Auto Scaling policy.
     * 
     * 
     * Deleting a policy deletes the underlying alarm action, but does not
     * delete the alarm, even if it no longer has an associated action.
     * 
     * 
     * @param deletePolicyRequest
     * @return Result of the DeletePolicy operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeletePolicy
     */
    @Override
    public DeletePolicyResult deletePolicy(
            DeletePolicyRequest deletePolicyRequest) {
        ExecutionContext executionContext = createExecutionContext(deletePolicyRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeletePolicyRequestMarshaller().marshall(super
                        .beforeMarshalling(deletePolicyRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeletePolicyResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified scheduled action.
     * 
     * 
     * @param deleteScheduledActionRequest
     * @return Result of the DeleteScheduledAction operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteScheduledAction
     */
    @Override
    public DeleteScheduledActionResult deleteScheduledAction(
            DeleteScheduledActionRequest deleteScheduledActionRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteScheduledActionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteScheduledActionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(deleteScheduledActionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteScheduledActionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Deletes the specified tags.
     * 
     * 
     * @param deleteTagsRequest
     * @return Result of the DeleteTags operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DeleteTags
     */
    @Override
    public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) {
        ExecutionContext executionContext = createExecutionContext(deleteTagsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DeleteTagsRequestMarshaller().marshall(super
                        .beforeMarshalling(deleteTagsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DeleteTagsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Describes the current Auto Scaling resource limits for your AWS account.
     * 
     * 
     * For information about requesting an increase in these limits, see AWS Service Limits in the Amazon Web Services General
     * Reference.
     * 
     * 
     * @param describeAccountLimitsRequest
     * @return Result of the DescribeAccountLimits operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeAccountLimits
     */
    @Override
    public DescribeAccountLimitsResult describeAccountLimits(
            DescribeAccountLimitsRequest describeAccountLimitsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeAccountLimitsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeAccountLimitsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeAccountLimitsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeAccountLimitsResult describeAccountLimits() {
        return describeAccountLimits(new DescribeAccountLimitsRequest());
    }
    /**
     * 
     * Describes the policy adjustment types for use with
     * PutScalingPolicy.
     * 
     * 
     * @param describeAdjustmentTypesRequest
     * @return Result of the DescribeAdjustmentTypes operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeAdjustmentTypes
     */
    @Override
    public DescribeAdjustmentTypesResult describeAdjustmentTypes(
            DescribeAdjustmentTypesRequest describeAdjustmentTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeAdjustmentTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeAdjustmentTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeAdjustmentTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeAdjustmentTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeAdjustmentTypesResult describeAdjustmentTypes() {
        return describeAdjustmentTypes(new DescribeAdjustmentTypesRequest());
    }
    /**
     * 
     * Describes one or more Auto Scaling groups. If a list of names is not
     * provided, the call describes all Auto Scaling groups.
     * 
     * 
     * @param describeAutoScalingGroupsRequest
     * @return Result of the DescribeAutoScalingGroups operation returned by the
     *         service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeAutoScalingGroups
     */
    @Override
    public DescribeAutoScalingGroupsResult describeAutoScalingGroups(
            DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeAutoScalingGroupsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeAutoScalingGroupsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeAutoScalingGroupsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeAutoScalingGroupsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeAutoScalingGroupsResult describeAutoScalingGroups() {
        return describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest());
    }
    /**
     * 
     * Describes one or more Auto Scaling instances. If a list is not provided,
     * the call describes all instances.
     * 
     * 
     * @param describeAutoScalingInstancesRequest
     * @return Result of the DescribeAutoScalingInstances operation returned by
     *         the service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeAutoScalingInstances
     */
    @Override
    public DescribeAutoScalingInstancesResult describeAutoScalingInstances(
            DescribeAutoScalingInstancesRequest describeAutoScalingInstancesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeAutoScalingInstancesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeAutoScalingInstancesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeAutoScalingInstancesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeAutoScalingInstancesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeAutoScalingInstancesResult describeAutoScalingInstances() {
        return describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest());
    }
    /**
     * 
     * Describes the notification types that are supported by Auto Scaling.
     * 
     * 
     * @param describeAutoScalingNotificationTypesRequest
     * @return Result of the DescribeAutoScalingNotificationTypes operation
     *         returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeAutoScalingNotificationTypes
     */
    @Override
    public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes(
            DescribeAutoScalingNotificationTypesRequest describeAutoScalingNotificationTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeAutoScalingNotificationTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeAutoScalingNotificationTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeAutoScalingNotificationTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeAutoScalingNotificationTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeAutoScalingNotificationTypesResult describeAutoScalingNotificationTypes() {
        return describeAutoScalingNotificationTypes(new DescribeAutoScalingNotificationTypesRequest());
    }
    /**
     * 
     * Describes one or more launch configurations. If you omit the list of
     * names, then the call describes all launch configurations.
     * 
     * 
     * @param describeLaunchConfigurationsRequest
     * @return Result of the DescribeLaunchConfigurations operation returned by
     *         the service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeLaunchConfigurations
     */
    @Override
    public DescribeLaunchConfigurationsResult describeLaunchConfigurations(
            DescribeLaunchConfigurationsRequest describeLaunchConfigurationsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeLaunchConfigurationsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeLaunchConfigurationsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeLaunchConfigurationsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeLaunchConfigurationsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeLaunchConfigurationsResult describeLaunchConfigurations() {
        return describeLaunchConfigurations(new DescribeLaunchConfigurationsRequest());
    }
    /**
     * 
     * Describes the available types of lifecycle hooks.
     * 
     * 
     * @param describeLifecycleHookTypesRequest
     * @return Result of the DescribeLifecycleHookTypes operation returned by
     *         the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeLifecycleHookTypes
     */
    @Override
    public DescribeLifecycleHookTypesResult describeLifecycleHookTypes(
            DescribeLifecycleHookTypesRequest describeLifecycleHookTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeLifecycleHookTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeLifecycleHookTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeLifecycleHookTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeLifecycleHookTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeLifecycleHookTypesResult describeLifecycleHookTypes() {
        return describeLifecycleHookTypes(new DescribeLifecycleHookTypesRequest());
    }
    /**
     * 
     * Describes the lifecycle hooks for the specified Auto Scaling group.
     * 
     * 
     * @param describeLifecycleHooksRequest
     * @return Result of the DescribeLifecycleHooks operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeLifecycleHooks
     */
    @Override
    public DescribeLifecycleHooksResult describeLifecycleHooks(
            DescribeLifecycleHooksRequest describeLifecycleHooksRequest) {
        ExecutionContext executionContext = createExecutionContext(describeLifecycleHooksRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeLifecycleHooksRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeLifecycleHooksRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeLifecycleHooksResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Describes the load balancers for the specified Auto Scaling group.
     * 
     * 
     * @param describeLoadBalancersRequest
     * @return Result of the DescribeLoadBalancers operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeLoadBalancers
     */
    @Override
    public DescribeLoadBalancersResult describeLoadBalancers(
            DescribeLoadBalancersRequest describeLoadBalancersRequest) {
        ExecutionContext executionContext = createExecutionContext(describeLoadBalancersRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeLoadBalancersRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeLoadBalancersRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeLoadBalancersResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Describes the available CloudWatch metrics for Auto Scaling.
     * 
     * 
     * Note that the GroupStandbyInstances metric is not returned
     * by default. You must explicitly request this metric when calling
     * EnableMetricsCollection.
     * 
     * 
     * @param describeMetricCollectionTypesRequest
     * @return Result of the DescribeMetricCollectionTypes operation returned by
     *         the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeMetricCollectionTypes
     */
    @Override
    public DescribeMetricCollectionTypesResult describeMetricCollectionTypes(
            DescribeMetricCollectionTypesRequest describeMetricCollectionTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeMetricCollectionTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeMetricCollectionTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeMetricCollectionTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeMetricCollectionTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeMetricCollectionTypesResult describeMetricCollectionTypes() {
        return describeMetricCollectionTypes(new DescribeMetricCollectionTypesRequest());
    }
    /**
     * 
     * Describes the notification actions associated with the specified Auto
     * Scaling group.
     * 
     * 
     * @param describeNotificationConfigurationsRequest
     * @return Result of the DescribeNotificationConfigurations operation
     *         returned by the service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeNotificationConfigurations
     */
    @Override
    public DescribeNotificationConfigurationsResult describeNotificationConfigurations(
            DescribeNotificationConfigurationsRequest describeNotificationConfigurationsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeNotificationConfigurationsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeNotificationConfigurationsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeNotificationConfigurationsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeNotificationConfigurationsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeNotificationConfigurationsResult describeNotificationConfigurations() {
        return describeNotificationConfigurations(new DescribeNotificationConfigurationsRequest());
    }
    /**
     * 
     * Describes the policies for the specified Auto Scaling group.
     * 
     * 
     * @param describePoliciesRequest
     * @return Result of the DescribePolicies operation returned by the service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribePolicies
     */
    @Override
    public DescribePoliciesResult describePolicies(
            DescribePoliciesRequest describePoliciesRequest) {
        ExecutionContext executionContext = createExecutionContext(describePoliciesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribePoliciesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describePoliciesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribePoliciesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribePoliciesResult describePolicies() {
        return describePolicies(new DescribePoliciesRequest());
    }
    /**
     * 
     * Describes one or more scaling activities for the specified Auto Scaling
     * group. If you omit the ActivityIds, the call returns all
     * activities from the past six weeks. Activities are sorted by the start
     * time. Activities still in progress appear first on the list.
     * 
     * 
     * @param describeScalingActivitiesRequest
     * @return Result of the DescribeScalingActivities operation returned by the
     *         service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeScalingActivities
     */
    @Override
    public DescribeScalingActivitiesResult describeScalingActivities(
            DescribeScalingActivitiesRequest describeScalingActivitiesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeScalingActivitiesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeScalingActivitiesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeScalingActivitiesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeScalingActivitiesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeScalingActivitiesResult describeScalingActivities() {
        return describeScalingActivities(new DescribeScalingActivitiesRequest());
    }
    /**
     * 
     * Describes the scaling process types for use with ResumeProcesses
     * and SuspendProcesses.
     * 
     * 
     * @param describeScalingProcessTypesRequest
     * @return Result of the DescribeScalingProcessTypes operation returned by
     *         the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeScalingProcessTypes
     */
    @Override
    public DescribeScalingProcessTypesResult describeScalingProcessTypes(
            DescribeScalingProcessTypesRequest describeScalingProcessTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeScalingProcessTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeScalingProcessTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeScalingProcessTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeScalingProcessTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeScalingProcessTypesResult describeScalingProcessTypes() {
        return describeScalingProcessTypes(new DescribeScalingProcessTypesRequest());
    }
    /**
     * 
     * Describes the actions scheduled for your Auto Scaling group that haven't
     * run. To describe the actions that have already run, use
     * DescribeScalingActivities.
     * 
     * 
     * @param describeScheduledActionsRequest
     * @return Result of the DescribeScheduledActions operation returned by the
     *         service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeScheduledActions
     */
    @Override
    public DescribeScheduledActionsResult describeScheduledActions(
            DescribeScheduledActionsRequest describeScheduledActionsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeScheduledActionsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeScheduledActionsRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeScheduledActionsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeScheduledActionsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeScheduledActionsResult describeScheduledActions() {
        return describeScheduledActions(new DescribeScheduledActionsRequest());
    }
    /**
     * 
     * Describes the specified tags.
     * 
     * 
     * You can use filters to limit the results. For example, you can query for
     * the tags for a specific Auto Scaling group. You can specify multiple
     * values for a filter. A tag must match at least one of the specified
     * values for it to be included in the results.
     * 
     * 
     * You can also specify multiple filters. The result includes information
     * for a particular tag only if it matches all the filters. If there's no
     * match, no special message is returned.
     * 
     * 
     * @param describeTagsRequest
     * @return Result of the DescribeTags operation returned by the service.
     * @throws InvalidNextTokenException
     *         The NextToken value is not valid.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeTags
     */
    @Override
    public DescribeTagsResult describeTags(
            DescribeTagsRequest describeTagsRequest) {
        ExecutionContext executionContext = createExecutionContext(describeTagsRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeTagsRequestMarshaller().marshall(super
                        .beforeMarshalling(describeTagsRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeTagsResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeTagsResult describeTags() {
        return describeTags(new DescribeTagsRequest());
    }
    /**
     * 
     * Describes the termination policies supported by Auto Scaling.
     * 
     * 
     * @param describeTerminationPolicyTypesRequest
     * @return Result of the DescribeTerminationPolicyTypes operation returned
     *         by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DescribeTerminationPolicyTypes
     */
    @Override
    public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes(
            DescribeTerminationPolicyTypesRequest describeTerminationPolicyTypesRequest) {
        ExecutionContext executionContext = createExecutionContext(describeTerminationPolicyTypesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DescribeTerminationPolicyTypesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(describeTerminationPolicyTypesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DescribeTerminationPolicyTypesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DescribeTerminationPolicyTypesResult describeTerminationPolicyTypes() {
        return describeTerminationPolicyTypes(new DescribeTerminationPolicyTypesRequest());
    }
    /**
     * 
     * Removes one or more instances from the specified Auto Scaling group.
     * 
     * 
     * After the instances are detached, you can manage them independently from
     * the rest of the Auto Scaling group.
     * 
     * 
     * If you do not specify the option to decrement the desired capacity, Auto
     * Scaling launches instances to replace the ones that are detached.
     * 
     * 
     * For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Auto
     * Scaling Developer Guide.
     * 
     * 
     * @param detachInstancesRequest
     * @return Result of the DetachInstances operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DetachInstances
     */
    @Override
    public DetachInstancesResult detachInstances(
            DetachInstancesRequest detachInstancesRequest) {
        ExecutionContext executionContext = createExecutionContext(detachInstancesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DetachInstancesRequestMarshaller().marshall(super
                        .beforeMarshalling(detachInstancesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DetachInstancesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Removes one or more load balancers from the specified Auto Scaling group.
     * 
     * 
     * When you detach a load balancer, it enters the Removing
     * state while deregistering the instances in the group. When all instances
     * are deregistered, then you can no longer describe the load balancer using
     * DescribeLoadBalancers. Note that the instances remain running.
     * 
     * 
     * @param detachLoadBalancersRequest
     * @return Result of the DetachLoadBalancers operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DetachLoadBalancers
     */
    @Override
    public DetachLoadBalancersResult detachLoadBalancers(
            DetachLoadBalancersRequest detachLoadBalancersRequest) {
        ExecutionContext executionContext = createExecutionContext(detachLoadBalancersRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DetachLoadBalancersRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(detachLoadBalancersRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DetachLoadBalancersResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    @Override
    public DetachLoadBalancersResult detachLoadBalancers() {
        return detachLoadBalancers(new DetachLoadBalancersRequest());
    }
    /**
     * 
     * Disables monitoring of the specified metrics for the specified Auto
     * Scaling group.
     * 
     * 
     * @param disableMetricsCollectionRequest
     * @return Result of the DisableMetricsCollection operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.DisableMetricsCollection
     */
    @Override
    public DisableMetricsCollectionResult disableMetricsCollection(
            DisableMetricsCollectionRequest disableMetricsCollectionRequest) {
        ExecutionContext executionContext = createExecutionContext(disableMetricsCollectionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new DisableMetricsCollectionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(disableMetricsCollectionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new DisableMetricsCollectionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Enables monitoring of the specified metrics for the specified Auto
     * Scaling group.
     * 
     * 
     * You can only enable metrics collection if InstanceMonitoring
     * in the launch configuration for the group is set to True.
     * 
     * 
     * @param enableMetricsCollectionRequest
     * @return Result of the EnableMetricsCollection operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.EnableMetricsCollection
     */
    @Override
    public EnableMetricsCollectionResult enableMetricsCollection(
            EnableMetricsCollectionRequest enableMetricsCollectionRequest) {
        ExecutionContext executionContext = createExecutionContext(enableMetricsCollectionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new EnableMetricsCollectionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(enableMetricsCollectionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new EnableMetricsCollectionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Moves the specified instances into Standby mode.
     * 
     * 
     * For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.
     * 
     * 
     * @param enterStandbyRequest
     * @return Result of the EnterStandby operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.EnterStandby
     */
    @Override
    public EnterStandbyResult enterStandby(
            EnterStandbyRequest enterStandbyRequest) {
        ExecutionContext executionContext = createExecutionContext(enterStandbyRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new EnterStandbyRequestMarshaller().marshall(super
                        .beforeMarshalling(enterStandbyRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new EnterStandbyResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Executes the specified policy.
     * 
     * 
     * @param executePolicyRequest
     * @return Result of the ExecutePolicy operation returned by the service.
     * @throws ScalingActivityInProgressException
     *         The Auto Scaling group can't be deleted because there are scaling
     *         activities in progress.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.ExecutePolicy
     */
    @Override
    public ExecutePolicyResult executePolicy(
            ExecutePolicyRequest executePolicyRequest) {
        ExecutionContext executionContext = createExecutionContext(executePolicyRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new ExecutePolicyRequestMarshaller().marshall(super
                        .beforeMarshalling(executePolicyRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new ExecutePolicyResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Moves the specified instances out of Standby mode.
     * 
     * 
     * For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.
     * 
     * 
     * @param exitStandbyRequest
     * @return Result of the ExitStandby operation returned by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.ExitStandby
     */
    @Override
    public ExitStandbyResult exitStandby(ExitStandbyRequest exitStandbyRequest) {
        ExecutionContext executionContext = createExecutionContext(exitStandbyRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new ExitStandbyRequestMarshaller().marshall(super
                        .beforeMarshalling(exitStandbyRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new ExitStandbyResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates or updates a lifecycle hook for the specified Auto Scaling Group.
     * 
     * 
     * A lifecycle hook tells Auto Scaling that you want to perform an action on
     * an instance that is not actively in service; for example, either when the
     * instance launches or before the instance terminates.
     * 
     * 
     * This step is a part of the procedure for adding a lifecycle hook to an
     * Auto Scaling group:
     * 
     * 
     * - (Optional) Create a Lambda function and a rule that allows CloudWatch
     * Events to invoke your Lambda function when Auto Scaling launches or
     * terminates instances.
 
     * - (Optional) Create a notification target and an IAM role. The target
     * can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
     * Auto Scaling to publish lifecycle notifications to the target.
 
     * - Create the lifecycle hook. Specify whether the hook is used when
     * the instances launch or terminate.
 
     * - If you need more time, record the lifecycle action heartbeat to keep
     * the instance in a pending state.
 
     * - If you finish before the timeout period ends, complete the lifecycle
     * action.
 
     * 
     * 
     * For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.
     * 
     * 
     * If you exceed your maximum limit of lifecycle hooks, which by default is
     * 50 per region, the call fails. For information about updating this limit,
     * see AWS Service Limits in the Amazon Web Services General
     * Reference.
     * 
     * 
     * @param putLifecycleHookRequest
     * @return Result of the PutLifecycleHook operation returned by the service.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.PutLifecycleHook
     */
    @Override
    public PutLifecycleHookResult putLifecycleHook(
            PutLifecycleHookRequest putLifecycleHookRequest) {
        ExecutionContext executionContext = createExecutionContext(putLifecycleHookRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new PutLifecycleHookRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(putLifecycleHookRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new PutLifecycleHookResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Configures an Auto Scaling group to send notifications when specified
     * events take place. Subscribers to this topic can have messages for events
     * delivered to an endpoint such as a web server or email address.
     * 
     * 
     * For more information see Getting Notifications When Your Auto Scaling Group Changes in the
     * Auto Scaling Developer Guide.
     * 
     * 
     * This configuration overwrites an existing configuration.
     * 
     * 
     * @param putNotificationConfigurationRequest
     * @return Result of the PutNotificationConfiguration operation returned by
     *         the service.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.PutNotificationConfiguration
     */
    @Override
    public PutNotificationConfigurationResult putNotificationConfiguration(
            PutNotificationConfigurationRequest putNotificationConfigurationRequest) {
        ExecutionContext executionContext = createExecutionContext(putNotificationConfigurationRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new PutNotificationConfigurationRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(putNotificationConfigurationRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new PutNotificationConfigurationResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates or updates a policy for an Auto Scaling group. To update an
     * existing policy, use the existing policy name and set the parameters you
     * want to change. Any existing parameter not changed in an update to an
     * existing policy is not changed in this update request.
     * 
     * 
     * If you exceed your maximum limit of step adjustments, which by default is
     * 20 per region, the call fails. For information about updating this limit,
     * see AWS Service Limits in the Amazon Web Services General
     * Reference.
     * 
     * 
     * @param putScalingPolicyRequest
     * @return Result of the PutScalingPolicy operation returned by the service.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.PutScalingPolicy
     */
    @Override
    public PutScalingPolicyResult putScalingPolicy(
            PutScalingPolicyRequest putScalingPolicyRequest) {
        ExecutionContext executionContext = createExecutionContext(putScalingPolicyRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new PutScalingPolicyRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(putScalingPolicyRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new PutScalingPolicyResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Creates or updates a scheduled scaling action for an Auto Scaling group.
     * When updating a scheduled scaling action, if you leave a parameter
     * unspecified, the corresponding value remains unchanged in the affected
     * Auto Scaling group.
     * 
     * 
     * For more information, see Scheduled Scaling in the Auto Scaling Developer Guide.
     * 
     * 
     * @param putScheduledUpdateGroupActionRequest
     * @return Result of the PutScheduledUpdateGroupAction operation returned by
     *         the service.
     * @throws AlreadyExistsException
     *         You already have an Auto Scaling group or launch configuration
     *         with this name.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.PutScheduledUpdateGroupAction
     */
    @Override
    public PutScheduledUpdateGroupActionResult putScheduledUpdateGroupAction(
            PutScheduledUpdateGroupActionRequest putScheduledUpdateGroupActionRequest) {
        ExecutionContext executionContext = createExecutionContext(putScheduledUpdateGroupActionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new PutScheduledUpdateGroupActionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(putScheduledUpdateGroupActionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new PutScheduledUpdateGroupActionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Records a heartbeat for the lifecycle action associated with the
     * specified token or instance. This extends the timeout by the length of
     * time defined using PutLifecycleHook.
     * 
     * 
     * This step is a part of the procedure for adding a lifecycle hook to an
     * Auto Scaling group:
     * 
     * 
     * - (Optional) Create a Lambda function and a rule that allows CloudWatch
     * Events to invoke your Lambda function when Auto Scaling launches or
     * terminates instances.
 
     * - (Optional) Create a notification target and an IAM role. The target
     * can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
     * Auto Scaling to publish lifecycle notifications to the target.
 
     * - Create the lifecycle hook. Specify whether the hook is used when the
     * instances launch or terminate.
 
     * - If you need more time, record the lifecycle action heartbeat to
     * keep the instance in a pending state.
 
     * - If you finish before the timeout period ends, complete the lifecycle
     * action.
 
     * 
     * 
     * For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.
     * 
     * 
     * @param recordLifecycleActionHeartbeatRequest
     * @return Result of the RecordLifecycleActionHeartbeat operation returned
     *         by the service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.RecordLifecycleActionHeartbeat
     */
    @Override
    public RecordLifecycleActionHeartbeatResult recordLifecycleActionHeartbeat(
            RecordLifecycleActionHeartbeatRequest recordLifecycleActionHeartbeatRequest) {
        ExecutionContext executionContext = createExecutionContext(recordLifecycleActionHeartbeatRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new RecordLifecycleActionHeartbeatRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(recordLifecycleActionHeartbeatRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new RecordLifecycleActionHeartbeatResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Resumes the specified suspended Auto Scaling processes, or all suspended
     * process, for the specified Auto Scaling group.
     * 
     * 
     * For more information, see Suspending and Resuming Auto Scaling Processes in the Auto
     * Scaling Developer Guide.
     * 
     * 
     * @param resumeProcessesRequest
     * @return Result of the ResumeProcesses operation returned by the service.
     * @throws ResourceInUseException
     *         The Auto Scaling group or launch configuration can't be deleted
     *         because it is in use.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.ResumeProcesses
     */
    @Override
    public ResumeProcessesResult resumeProcesses(
            ResumeProcessesRequest resumeProcessesRequest) {
        ExecutionContext executionContext = createExecutionContext(resumeProcessesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new ResumeProcessesRequestMarshaller().marshall(super
                        .beforeMarshalling(resumeProcessesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new ResumeProcessesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Sets the size of the specified Auto Scaling group.
     * 
     * 
     * For more information about desired capacity, see What Is Auto Scaling? in the Auto Scaling Developer Guide.
     * 
     * 
     * @param setDesiredCapacityRequest
     * @return Result of the SetDesiredCapacity operation returned by the
     *         service.
     * @throws ScalingActivityInProgressException
     *         The Auto Scaling group can't be deleted because there are scaling
     *         activities in progress.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.SetDesiredCapacity
     */
    @Override
    public SetDesiredCapacityResult setDesiredCapacity(
            SetDesiredCapacityRequest setDesiredCapacityRequest) {
        ExecutionContext executionContext = createExecutionContext(setDesiredCapacityRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new SetDesiredCapacityRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(setDesiredCapacityRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new SetDesiredCapacityResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Sets the health status of the specified instance.
     * 
     * 
     * For more information, see Health Checks in the Auto Scaling Developer Guide.
     * 
     * 
     * @param setInstanceHealthRequest
     * @return Result of the SetInstanceHealth operation returned by the
     *         service.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.SetInstanceHealth
     */
    @Override
    public SetInstanceHealthResult setInstanceHealth(
            SetInstanceHealthRequest setInstanceHealthRequest) {
        ExecutionContext executionContext = createExecutionContext(setInstanceHealthRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new SetInstanceHealthRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(setInstanceHealthRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new SetInstanceHealthResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Updates the instance protection settings of the specified instances.
     * 
     * 
     * For more information, see Instance Protection in the Auto Scaling Developer Guide.
     * 
     * 
     * @param setInstanceProtectionRequest
     * @return Result of the SetInstanceProtection operation returned by the
     *         service.
     * @throws LimitExceededException
     *         You have already reached a limit for your Auto Scaling resources
     *         (for example, groups, launch configurations, or lifecycle hooks).
     *         For more information, see DescribeAccountLimits.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.SetInstanceProtection
     */
    @Override
    public SetInstanceProtectionResult setInstanceProtection(
            SetInstanceProtectionRequest setInstanceProtectionRequest) {
        ExecutionContext executionContext = createExecutionContext(setInstanceProtectionRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new SetInstanceProtectionRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(setInstanceProtectionRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new SetInstanceProtectionResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Suspends the specified Auto Scaling processes, or all processes, for the
     * specified Auto Scaling group.
     * 
     * 
     * Note that if you suspend either the Launch or
     * Terminate process types, it can prevent other process types
     * from functioning properly.
     * 
     * 
     * To resume processes that have been suspended, use ResumeProcesses.
     * 
     * 
     * For more information, see Suspending and Resuming Auto Scaling Processes in the Auto
     * Scaling Developer Guide.
     * 
     * 
     * @param suspendProcessesRequest
     * @return Result of the SuspendProcesses operation returned by the service.
     * @throws ResourceInUseException
     *         The Auto Scaling group or launch configuration can't be deleted
     *         because it is in use.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.SuspendProcesses
     */
    @Override
    public SuspendProcessesResult suspendProcesses(
            SuspendProcessesRequest suspendProcessesRequest) {
        ExecutionContext executionContext = createExecutionContext(suspendProcessesRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new SuspendProcessesRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(suspendProcessesRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new SuspendProcessesResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Terminates the specified instance and optionally adjusts the desired
     * group size.
     * 
     * 
     * This call simply makes a termination request. The instance is not
     * terminated immediately.
     * 
     * 
     * @param terminateInstanceInAutoScalingGroupRequest
     * @return Result of the TerminateInstanceInAutoScalingGroup operation
     *         returned by the service.
     * @throws ScalingActivityInProgressException
     *         The Auto Scaling group can't be deleted because there are scaling
     *         activities in progress.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.TerminateInstanceInAutoScalingGroup
     */
    @Override
    public TerminateInstanceInAutoScalingGroupResult terminateInstanceInAutoScalingGroup(
            TerminateInstanceInAutoScalingGroupRequest terminateInstanceInAutoScalingGroupRequest) {
        ExecutionContext executionContext = createExecutionContext(terminateInstanceInAutoScalingGroupRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new TerminateInstanceInAutoScalingGroupRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(terminateInstanceInAutoScalingGroupRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new TerminateInstanceInAutoScalingGroupResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * 
     * Updates the configuration for the specified Auto Scaling group.
     * 
     * 
     * To update an Auto Scaling group with a launch configuration with
     * InstanceMonitoring set to False, you must first
     * disable the collection of group metrics. Otherwise, you will get an
     * error. If you have previously enabled the collection of group metrics,
     * you can disable it using DisableMetricsCollection.
     * 
     * 
     * The new settings are registered upon the completion of this call. Any
     * launch configuration settings take effect on any triggers after this call
     * returns. Scaling activities that are currently in progress aren't
     * affected.
     * 
     * 
     * Note the following:
     * 
     * 
     * - 
     * 
     * If you specify a new value for MinSize without specifying a
     * value for DesiredCapacity, and the new MinSize
     * is larger than the current size of the group, we implicitly call
     * SetDesiredCapacity to set the size of the group to the new value
     * of MinSize.
     * 
     *  
     * - 
     * 
     * If you specify a new value for MaxSize without specifying a
     * value for DesiredCapacity, and the new MaxSize
     * is smaller than the current size of the group, we implicitly call
     * SetDesiredCapacity to set the size of the group to the new value
     * of MaxSize.
     * 
     *  
     * - 
     * 
     * All other optional parameters are left unchanged if not specified.
     * 
     *  
     * 
     * 
     * @param updateAutoScalingGroupRequest
     * @return Result of the UpdateAutoScalingGroup operation returned by the
     *         service.
     * @throws ScalingActivityInProgressException
     *         The Auto Scaling group can't be deleted because there are scaling
     *         activities in progress.
     * @throws ResourceContentionException
     *         You already have a pending update to an Auto Scaling resource
     *         (for example, a group, instance, or load balancer).
     * @sample AmazonAutoScaling.UpdateAutoScalingGroup
     */
    @Override
    public UpdateAutoScalingGroupResult updateAutoScalingGroup(
            UpdateAutoScalingGroupRequest updateAutoScalingGroupRequest) {
        ExecutionContext executionContext = createExecutionContext(updateAutoScalingGroupRequest);
        AWSRequestMetrics awsRequestMetrics = executionContext
                .getAwsRequestMetrics();
        awsRequestMetrics.startEvent(Field.ClientExecuteTime);
        Request request = null;
        Response response = null;
        try {
            awsRequestMetrics.startEvent(Field.RequestMarshallTime);
            try {
                request = new UpdateAutoScalingGroupRequestMarshaller()
                        .marshall(super
                                .beforeMarshalling(updateAutoScalingGroupRequest));
                // Binds the request metrics to the current request.
                request.setAWSRequestMetrics(awsRequestMetrics);
            } finally {
                awsRequestMetrics.endEvent(Field.RequestMarshallTime);
            }
            StaxResponseHandler responseHandler = new StaxResponseHandler(
                    new UpdateAutoScalingGroupResultStaxUnmarshaller());
            response = invoke(request, responseHandler, executionContext);
            return response.getAwsResponse();
        } finally {
            endClientExecution(awsRequestMetrics, request, response);
        }
    }
    /**
     * Returns additional metadata for a previously executed successful,
     * request, typically used for debugging issues where a service isn't acting
     * as expected. This data isn't considered part of the result data returned
     * by an operation, so it's available through this separate, diagnostic
     * interface.
     * 
     * Response metadata is only cached for a limited period of time, so if you
     * need to access this extra diagnostic information for an executed request,
     * you should use this method to retrieve it as soon as possible after
     * executing the request.
     *
     * @param request
     *        The originally executed request
     *
     * @return The response metadata for the specified request, or null if none
     *         is available.
     */
    public ResponseMetadata getCachedResponseMetadata(
            AmazonWebServiceRequest request) {
        return client.getResponseMetadataForRequest(request);
    }
    /**
     * Normal invoke with authentication. Credentials are required and may be
     * overriden at the request level.
     **/
    private  Response invoke(
            Request request,
            HttpResponseHandler> responseHandler,
            ExecutionContext executionContext) {
        executionContext.setCredentialsProvider(CredentialUtils
                .getCredentialsProvider(request.getOriginalRequest(),
                        awsCredentialsProvider));
        return doInvoke(request, responseHandler, executionContext);
    }
    /**
     * Invoke with no authentication. Credentials are not required and any
     * credentials set on the client or request will be ignored for this
     * operation.
     **/
    private  Response anonymousInvoke(
            Request request,
            HttpResponseHandler> responseHandler,
            ExecutionContext executionContext) {
        return doInvoke(request, responseHandler, executionContext);
    }
    /**
     * Invoke the request using the http client. Assumes credentials (or lack
     * thereof) have been configured in the ExecutionContext beforehand.
     **/
    private  Response doInvoke(
            Request request,
            HttpResponseHandler> responseHandler,
            ExecutionContext executionContext) {
        request.setEndpoint(endpoint);
        request.setTimeOffset(timeOffset);
        DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(
                exceptionUnmarshallers);
        return client.execute(request, responseHandler, errorResponseHandler,
                executionContext);
    }
}