com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehoseClient Maven / Gradle / Ivy
/*
* Copyright 2011-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not
* use this file except in compliance with the License. A copy of the License is
* located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.kinesisfirehose;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import java.util.Map.Entry;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.kinesisfirehose.model.*;
import com.amazonaws.services.kinesisfirehose.model.transform.*;
/**
* Client for accessing Firehose. All service calls made using this client are
* blocking, and will not return until the service call completes.
*
* Amazon Kinesis Firehose API Reference
*
* Amazon Kinesis Firehose is a fully-managed service that delivers real-time
* streaming data to destinations such as Amazon Simple Storage Service (Amazon
* S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.
*
*/
@ThreadSafe
public class AmazonKinesisFirehoseClient extends AmazonWebServiceClient
implements AmazonKinesisFirehose {
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory
.getLog(AmazonKinesisFirehose.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "firehose";
/**
* Client configuration factory providing ClientConfigurations tailored to
* this client
*/
protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
private final SdkJsonProtocolFactory protocolFactory = new SdkJsonProtocolFactory(
new JsonClientMetadata()
.withProtocolVersion("1.1")
.withSupportsCbor(false)
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode(
"ConcurrentModificationException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.ConcurrentModificationException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode(
"ServiceUnavailableException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.ServiceUnavailableException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode("InvalidArgumentException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.InvalidArgumentException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode("ResourceInUseException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.ResourceInUseException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode("ResourceNotFoundException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.ResourceNotFoundException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata()
.withErrorCode("LimitExceededException")
.withModeledClass(
com.amazonaws.services.kinesisfirehose.model.LimitExceededException.class))
.withBaseServiceExceptionClass(
com.amazonaws.services.kinesisfirehose.model.AmazonKinesisFirehoseException.class));
/**
* Constructs a new client to invoke service methods on Firehose. A
* credentials provider chain will be used that searches for credentials in
* this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2
* metadata service
*
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @see DefaultAWSCredentialsProviderChain
*/
public AmazonKinesisFirehoseClient() {
this(new DefaultAWSCredentialsProviderChain(), configFactory
.getConfig());
}
/**
* Constructs a new client to invoke service methods on Firehose. A
* credentials provider chain will be used that searches for credentials in
* this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2
* metadata service
*
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Firehose (ex: proxy settings, retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
*/
public AmazonKinesisFirehoseClient(ClientConfiguration clientConfiguration) {
this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when
* authenticating with AWS services.
*/
public AmazonKinesisFirehoseClient(AWSCredentials awsCredentials) {
this(awsCredentials, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified AWS account credentials and client configuration options.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when
* authenticating with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Firehose (ex: proxy settings, retry counts, etc.).
*/
public AmazonKinesisFirehoseClient(AWSCredentials awsCredentials,
ClientConfiguration clientConfiguration) {
super(clientConfiguration);
this.awsCredentialsProvider = new StaticCredentialsProvider(
awsCredentials);
init();
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified AWS account credentials provider.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to
* authenticate requests with AWS services.
*/
public AmazonKinesisFirehoseClient(
AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified AWS account credentials provider and client configuration
* options.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to
* authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Firehose (ex: proxy settings, retry counts, etc.).
*/
public AmazonKinesisFirehoseClient(
AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified AWS account credentials provider, client configuration options,
* and request metric collector.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to
* authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Firehose (ex: proxy settings, retry counts, etc.).
* @param requestMetricCollector
* optional request metric collector
*/
public AmazonKinesisFirehoseClient(
AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(clientConfiguration, requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
init();
}
/**
* Constructs a new client to invoke service methods on Firehose using the
* specified parameters.
*
*
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonKinesisFirehoseClient(AwsSyncClientParams clientParams) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
init();
}
private void init() {
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
setEndpoint("https://firehose.us-east-1.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s
.addAll(chainFactory
.newRequestHandlerChain("/com/amazonaws/services/kinesisfirehose/request.handlers"));
requestHandler2s
.addAll(chainFactory
.newRequestHandler2Chain("/com/amazonaws/services/kinesisfirehose/request.handler2s"));
}
/**
*
* Creates a delivery stream.
*
*
* CreateDeliveryStream is an asynchronous operation that immediately
* returns. The initial status of the delivery stream is
* CREATING
. After the delivery stream is created, its status
* is ACTIVE
and it now accepts data. Attempts to send data to
* a delivery stream that is not in the ACTIVE
state cause an
* exception. To check the state of a delivery stream, use
* DescribeDeliveryStream.
*
*
* The name of a delivery stream identifies it. You can't have two delivery
* streams with the same name in the same region. Two delivery streams in
* different AWS accounts or different regions in the same AWS account can
* have the same name.
*
*
* By default, you can create up to 20 delivery streams per region.
*
*
* A delivery stream can only be configured with a single destination,
* Amazon S3, Amazon Elasticsearch Service, or Amazon Redshift. For correct
* CreateDeliveryStream request syntax, specify only one destination
* configuration parameter: either S3DestinationConfiguration,
* ElasticsearchDestinationConfiguration, or
* RedshiftDestinationConfiguration.
*
*
* As part of S3DestinationConfiguration, optional values
* BufferingHints, EncryptionConfiguration, and
* CompressionFormat can be provided. By default, if no
* BufferingHints value is provided, Firehose buffers data up to 5 MB
* or for 5 minutes, whichever condition is satisfied first. Note that
* BufferingHints is a hint, so there are some cases where the
* service cannot adhere to these conditions strictly; for example, record
* boundaries are such that the size is a little over or under the
* configured buffering size. By default, no encryption is performed. We
* strongly recommend that you enable encryption to ensure secure data
* storage in Amazon S3.
*
*
* A few notes about RedshiftDestinationConfiguration:
*
*
* -
*
* An Amazon Redshift destination requires an S3 bucket as intermediate
* location, as Firehose first delivers data to S3 and then uses
* COPY
syntax to load data into an Amazon Redshift table. This
* is specified in the
* RedshiftDestinationConfiguration.S3Configuration parameter
* element.
*
*
* -
*
* The compression formats SNAPPY
or ZIP
cannot be
* specified in RedshiftDestinationConfiguration.S3Configuration
* because the Amazon Redshift COPY
operation that reads from
* the S3 bucket doesn't support these compression formats.
*
*
* -
*
* We strongly recommend that the username and password provided is used
* exclusively for Firehose purposes, and that the permissions for the
* account are restricted for Amazon Redshift INSERT
* permissions.
*
*
*
*
* Firehose assumes the IAM role that is configured as part of destinations.
* The IAM role should allow the Firehose principal to assume the role, and
* the role should have permissions that allows the service to deliver the
* data. For more information, see Amazon S3 Bucket Access in the Amazon Kinesis Firehose Developer
* Guide.
*
*
* @param createDeliveryStreamRequest
* Contains the parameters for CreateDeliveryStream.
* @return Result of the CreateDeliveryStream operation returned by the
* service.
* @throws InvalidArgumentException
* The specified input parameter has an value that is not valid.
* @throws LimitExceededException
* You have already reached the limit for a requested resource.
* @throws ResourceInUseException
* The resource is already in use and not available for this
* operation.
* @sample AmazonKinesisFirehose.CreateDeliveryStream
*/
@Override
public CreateDeliveryStreamResult createDeliveryStream(
CreateDeliveryStreamRequest createDeliveryStreamRequest) {
ExecutionContext executionContext = createExecutionContext(createDeliveryStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateDeliveryStreamRequestMarshaller(
protocolFactory).marshall(super
.beforeMarshalling(createDeliveryStreamRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new CreateDeliveryStreamResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes a delivery stream and its data.
*
*
* You can delete a delivery stream only if it is in ACTIVE
or
* DELETING
state, and not in the CREATING
state.
* While the deletion request is in process, the delivery stream is in the
* DELETING
state.
*
*
* To check the state of a delivery stream, use
* DescribeDeliveryStream.
*
*
* While the delivery stream is DELETING
state, the service may
* continue to accept the records, but the service doesn't make any
* guarantees with respect to delivering the data. Therefore, as a best
* practice, you should first stop any applications that are sending records
* before deleting a delivery stream.
*
*
* @param deleteDeliveryStreamRequest
* Contains the parameters for DeleteDeliveryStream.
* @return Result of the DeleteDeliveryStream operation returned by the
* service.
* @throws ResourceInUseException
* The resource is already in use and not available for this
* operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @sample AmazonKinesisFirehose.DeleteDeliveryStream
*/
@Override
public DeleteDeliveryStreamResult deleteDeliveryStream(
DeleteDeliveryStreamRequest deleteDeliveryStreamRequest) {
ExecutionContext executionContext = createExecutionContext(deleteDeliveryStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteDeliveryStreamRequestMarshaller(
protocolFactory).marshall(super
.beforeMarshalling(deleteDeliveryStreamRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new DeleteDeliveryStreamResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes the specified delivery stream and gets the status. For example,
* after your delivery stream is created, call DescribeDeliveryStream
* to see if the delivery stream is ACTIVE
and therefore ready
* for data to be sent to it.
*
*
* @param describeDeliveryStreamRequest
* Contains the parameters for DescribeDeliveryStream.
* @return Result of the DescribeDeliveryStream operation returned by the
* service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @sample AmazonKinesisFirehose.DescribeDeliveryStream
*/
@Override
public DescribeDeliveryStreamResult describeDeliveryStream(
DescribeDeliveryStreamRequest describeDeliveryStreamRequest) {
ExecutionContext executionContext = createExecutionContext(describeDeliveryStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeDeliveryStreamRequestMarshaller(
protocolFactory).marshall(super
.beforeMarshalling(describeDeliveryStreamRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new DescribeDeliveryStreamResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Lists your delivery streams.
*
*
* The number of delivery streams might be too large to return using a
* single call to ListDeliveryStreams. You can limit the number of
* delivery streams returned, using the Limit parameter. To determine
* whether there are more delivery streams to list, check the value of
* HasMoreDeliveryStreams in the output. If there are more delivery
* streams to list, you can request them by specifying the name of the last
* delivery stream returned in the call in the
* ExclusiveStartDeliveryStreamName parameter of a subsequent call.
*
*
* @param listDeliveryStreamsRequest
* Contains the parameters for ListDeliveryStreams.
* @return Result of the ListDeliveryStreams operation returned by the
* service.
* @sample AmazonKinesisFirehose.ListDeliveryStreams
*/
@Override
public ListDeliveryStreamsResult listDeliveryStreams(
ListDeliveryStreamsRequest listDeliveryStreamsRequest) {
ExecutionContext executionContext = createExecutionContext(listDeliveryStreamsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListDeliveryStreamsRequestMarshaller(
protocolFactory).marshall(super
.beforeMarshalling(listDeliveryStreamsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new ListDeliveryStreamsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Writes a single data record into an Amazon Kinesis Firehose delivery
* stream. To write multiple data records into a delivery stream, use
* PutRecordBatch. Applications using these operations are referred
* to as producers.
*
*
* By default, each delivery stream can take in up to 2,000 transactions per
* second, 5,000 records per second, or 5 MB per second. Note that if you
* use PutRecord and PutRecordBatch, the limits are an
* aggregate across these two operations for each delivery stream. For more
* information about limits and how to request an increase, see Amazon
* Kinesis Firehose Limits.
*
*
* You must specify the name of the delivery stream and the data record when
* using PutRecord. The data record consists of a data blob that can
* be up to 1,000 KB in size, and any kind of data, for example, a segment
* from a log file, geographic location data, web site clickstream data,
* etc.
*
*
* Firehose buffers records before delivering them to the destination. To
* disambiguate the data blobs at the destination, a common solution is to
* use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer
* application(s) to parse individual data items when reading the data from
* the destination.
*
*
* The PutRecord operation returns a RecordId, which is a
* unique string assigned to each record. Producer applications can use this
* ID for purposes such as auditability and investigation.
*
*
* If the PutRecord operation throws a
* ServiceUnavailableException, back off and retry. If the exception
* persists, it is possible that the throughput limits have been exceeded
* for the delivery stream.
*
*
* Data records sent to Firehose are stored for 24 hours from the time they
* are added to a delivery stream as it attempts to send the records to the
* destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordRequest
* Contains the parameters for PutRecord.
* @return Result of the PutRecord operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has an value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If
* you continue to see the exception, throughput limits for the
* delivery stream may have been exceeded. For more information
* about limits and how to request an increase, see Amazon Kinesis Firehose Limits.
* @sample AmazonKinesisFirehose.PutRecord
*/
@Override
public PutRecordResult putRecord(PutRecordRequest putRecordRequest) {
ExecutionContext executionContext = createExecutionContext(putRecordRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutRecordRequestMarshaller(protocolFactory)
.marshall(super.beforeMarshalling(putRecordRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new PutRecordResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Writes multiple data records into a delivery stream in a single call,
* which can achieve higher throughput per producer than when writing single
* records. To write single data records into a delivery stream, use
* PutRecord. Applications using these operations are referred to as
* producers.
*
*
* Each PutRecordBatch request supports up to 500 records. Each
* record in the request can be as large as 1,000 KB (before 64-bit
* encoding), up to a limit of 4 MB for the entire request. By default, each
* delivery stream can take in up to 2,000 transactions per second, 5,000
* records per second, or 5 MB per second. Note that if you use
* PutRecord and PutRecordBatch, the limits are an aggregate
* across these two operations for each delivery stream. For more
* information about limits and how to request an increase, see Amazon
* Kinesis Firehose Limits.
*
*
* You must specify the name of the delivery stream and the data record when
* using PutRecord. The data record consists of a data blob that can
* be up to 1,000 KB in size, and any kind of data, for example, a segment
* from a log file, geographic location data, web site clickstream data, and
* so on.
*
*
* Firehose buffers records before delivering them to the destination. To
* disambiguate the data blobs at the destination, a common solution is to
* use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer
* application(s) to parse individual data items when reading the data from
* the destination.
*
*
* The PutRecordBatch response includes a count of any failed
* records, FailedPutCount, and an array of responses,
* RequestResponses. The FailedPutCount value is a count of
* records that failed. Each entry in the RequestResponses array
* gives additional information of the processed record. Each entry in
* RequestResponses directly correlates with a record in the request
* array using the same ordering, from the top to the bottom of the request
* and response. RequestResponses always includes the same number of
* records as the request array. RequestResponses both successfully
* and unsuccessfully processed records. Firehose attempts to process all
* records in each PutRecordBatch request. A single record failure
* does not stop the processing of subsequent records.
*
*
* A successfully processed record includes a RecordId value, which
* is a unique value identified for the record. An unsuccessfully processed
* record includes ErrorCode and ErrorMessage values.
* ErrorCode reflects the type of error and is one of the following
* values: ServiceUnavailable
or InternalFailure
.
* ErrorMessage
provides more detailed information about the
* error.
*
*
* If FailedPutCount is greater than 0 (zero), retry the request. A
* retry of the entire batch of records is possible; however, we strongly
* recommend that you inspect the entire response and resend only those
* records that failed processing. This minimizes duplicate records and also
* reduces the total bytes sent (and corresponding charges).
*
*
* If the PutRecordBatch operation throws a
* ServiceUnavailableException, back off and retry. If the exception
* persists, it is possible that the throughput limits have been exceeded
* for the delivery stream.
*
*
* Data records sent to Firehose are stored for 24 hours from the time they
* are added to a delivery stream as it attempts to send the records to the
* destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordBatchRequest
* Contains the parameters for PutRecordBatch.
* @return Result of the PutRecordBatch operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has an value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If
* you continue to see the exception, throughput limits for the
* delivery stream may have been exceeded. For more information
* about limits and how to request an increase, see Amazon Kinesis Firehose Limits.
* @sample AmazonKinesisFirehose.PutRecordBatch
*/
@Override
public PutRecordBatchResult putRecordBatch(
PutRecordBatchRequest putRecordBatchRequest) {
ExecutionContext executionContext = createExecutionContext(putRecordBatchRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutRecordBatchRequestMarshaller(protocolFactory)
.marshall(super
.beforeMarshalling(putRecordBatchRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new PutRecordBatchResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the specified destination of the specified delivery stream. Note:
* Switching between Elasticsearch and other services is not supported. For
* Elasticsearch destination, you can only update an existing Elasticsearch
* destination with this operation.
*
*
* This operation can be used to change the destination type (for example,
* to replace the Amazon S3 destination with Amazon Redshift) or change the
* parameters associated with a given destination (for example, to change
* the bucket name of the Amazon S3 destination). The update may not occur
* immediately. The target delivery stream remains active while the
* configurations are updated, so data writes to the delivery stream can
* continue during this process. The updated configurations are normally
* effective within a few minutes.
*
*
* If the destination type is the same, Firehose merges the configuration
* parameters specified in the UpdateDestination request with the
* destination configuration that already exists on the delivery stream. If
* any of the parameters are not specified in the update request, then the
* existing configuration parameters are retained. For example, in the
* Amazon S3 destination, if EncryptionConfiguration is not specified
* then the existing EncryptionConfiguration is maintained on the
* destination.
*
*
* If the destination type is not the same, for example, changing the
* destination from Amazon S3 to Amazon Redshift, Firehose does not merge
* any parameters. In this case, all parameters must be specified.
*
*
* Firehose uses the CurrentDeliveryStreamVersionId to avoid race
* conditions and conflicting merges. This is a required field in every
* request and the service only updates the configuration if the existing
* configuration matches the VersionId. After the update is applied
* successfully, the VersionId is updated, which can be retrieved
* with the DescribeDeliveryStream operation. The new
* VersionId should be uses to set
* CurrentDeliveryStreamVersionId in the next
* UpdateDestination operation.
*
*
* @param updateDestinationRequest
* Contains the parameters for UpdateDestination.
* @return Result of the UpdateDestination operation returned by the
* service.
* @throws InvalidArgumentException
* The specified input parameter has an value that is not valid.
* @throws ResourceInUseException
* The resource is already in use and not available for this
* operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws ConcurrentModificationException
* Another modification has already happened. Fetch VersionId
* again and use it to update the destination.
* @sample AmazonKinesisFirehose.UpdateDestination
*/
@Override
public UpdateDestinationResult updateDestination(
UpdateDestinationRequest updateDestinationRequest) {
ExecutionContext executionContext = createExecutionContext(updateDestinationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext
.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateDestinationRequestMarshaller(
protocolFactory).marshall(super
.beforeMarshalling(updateDestinationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true)
.withHasStreamingSuccessResponse(false),
new UpdateDestinationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* Returns additional metadata for a previously executed successful,
* request, typically used for debugging issues where a service isn't acting
* as expected. This data isn't considered part of the result data returned
* by an operation, so it's available through this separate, diagnostic
* interface.
*
* Response metadata is only cached for a limited period of time, so if you
* need to access this extra diagnostic information for an executed request,
* you should use this method to retrieve it as soon as possible after
* executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none
* is available.
*/
public ResponseMetadata getCachedResponseMetadata(
AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
/**
* Normal invoke with authentication. Credentials are required and may be
* overriden at the request level.
**/
private Response invoke(
Request request,
HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
executionContext.setCredentialsProvider(CredentialUtils
.getCredentialsProvider(request.getOriginalRequest(),
awsCredentialsProvider));
return doInvoke(request, responseHandler, executionContext);
}
/**
* Invoke with no authentication. Credentials are not required and any
* credentials set on the client or request will be ignored for this
* operation.
**/
private Response anonymousInvoke(
Request request,
HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
return doInvoke(request, responseHandler, executionContext);
}
/**
* Invoke the request using the http client. Assumes credentials (or lack
* thereof) have been configured in the ExecutionContext beforehand.
**/
private Response doInvoke(
Request request,
HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
request.setEndpoint(endpoint);
request.setTimeOffset(timeOffset);
HttpResponseHandler errorResponseHandler = protocolFactory
.createErrorResponseHandler(new JsonErrorResponseMetadata());
return client.execute(request, responseHandler, errorResponseHandler,
executionContext);
}
}