All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehoseClient Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon Kinesis module holds the client classes that are used for communicating with Amazon Kinesis Service

There is a newer version: 1.12.772
Show newest version
/*
 * Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */
package com.amazonaws.services.kinesisfirehose;

import org.w3c.dom.*;

import java.net.*;
import java.util.*;

import javax.annotation.Generated;

import org.apache.commons.logging.*;

import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;

import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehoseClientBuilder;

import com.amazonaws.AmazonServiceException;

import com.amazonaws.services.kinesisfirehose.model.*;
import com.amazonaws.services.kinesisfirehose.model.transform.*;

/**
 * Client for accessing Firehose. All service calls made using this client are blocking, and will not return until the
 * service call completes.
 * 

* Amazon Kinesis Firehose API Reference *

* Amazon Kinesis Firehose is a fully managed service that delivers real-time streaming data to destinations such as * Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift. *

*/ @ThreadSafe @Generated("com.amazonaws:aws-java-sdk-code-generator") public class AmazonKinesisFirehoseClient extends AmazonWebServiceClient implements AmazonKinesisFirehose { /** Provider for AWS credentials. */ private final AWSCredentialsProvider awsCredentialsProvider; private static final Log log = LogFactory.getLog(AmazonKinesisFirehose.class); /** Default signing name for the service. */ private static final String DEFAULT_SIGNING_NAME = "firehose"; /** Client configuration factory providing ClientConfigurations tailored to this client */ protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory(); private static final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory = new com.amazonaws.protocol.json.SdkJsonProtocolFactory( new JsonClientMetadata() .withProtocolVersion("1.1") .withSupportsCbor(false) .withSupportsIon(false) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("ConcurrentModificationException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.ConcurrentModificationException.class)) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("InvalidArgumentException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.InvalidArgumentException.class)) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("ResourceInUseException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.ResourceInUseException.class)) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("ResourceNotFoundException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.ResourceNotFoundException.class)) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("ServiceUnavailableException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.ServiceUnavailableException.class)) .addErrorMetadata( new JsonErrorShapeMetadata().withErrorCode("LimitExceededException").withModeledClass( com.amazonaws.services.kinesisfirehose.model.LimitExceededException.class)) .withBaseServiceExceptionClass(com.amazonaws.services.kinesisfirehose.model.AmazonKinesisFirehoseException.class)); /** * Constructs a new client to invoke service methods on Firehose. A credentials provider chain will be used that * searches for credentials in this order: *
    *
  • Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
  • *
  • Java System Properties - aws.accessKeyId and aws.secretKey
  • *
  • Instance profile credentials delivered through the Amazon EC2 metadata service
  • *
* *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @see DefaultAWSCredentialsProviderChain * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#defaultClient()} */ @Deprecated public AmazonKinesisFirehoseClient() { this(DefaultAWSCredentialsProviderChain.getInstance(), configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Firehose. A credentials provider chain will be used that * searches for credentials in this order: *

    *
  • Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
  • *
  • Java System Properties - aws.accessKeyId and aws.secretKey
  • *
  • Instance profile credentials delivered through the Amazon EC2 metadata service
  • *
* *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param clientConfiguration * The client configuration options controlling how this client connects to Firehose (ex: proxy settings, * retry counts, etc.). * * @see DefaultAWSCredentialsProviderChain * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withClientConfiguration(ClientConfiguration)} */ @Deprecated public AmazonKinesisFirehoseClient(ClientConfiguration clientConfiguration) { this(DefaultAWSCredentialsProviderChain.getInstance(), clientConfiguration); } /** * Constructs a new client to invoke service methods on Firehose using the specified AWS account credentials. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when authenticating with AWS services. * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withCredentials(AWSCredentialsProvider)} for example: * {@code AmazonKinesisFirehoseClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)).build();} */ @Deprecated public AmazonKinesisFirehoseClient(AWSCredentials awsCredentials) { this(awsCredentials, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Firehose using the specified AWS account credentials and * client configuration options. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when authenticating with AWS services. * @param clientConfiguration * The client configuration options controlling how this client connects to Firehose (ex: proxy settings, * retry counts, etc.). * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonKinesisFirehoseClientBuilder#withClientConfiguration(ClientConfiguration)} */ @Deprecated public AmazonKinesisFirehoseClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { super(clientConfiguration); this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials); init(); } /** * Constructs a new client to invoke service methods on Firehose using the specified AWS account credentials * provider. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withCredentials(AWSCredentialsProvider)} */ @Deprecated public AmazonKinesisFirehoseClient(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Firehose using the specified AWS account credentials * provider and client configuration options. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client connects to Firehose (ex: proxy settings, * retry counts, etc.). * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonKinesisFirehoseClientBuilder#withClientConfiguration(ClientConfiguration)} */ @Deprecated public AmazonKinesisFirehoseClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, null); } /** * Constructs a new client to invoke service methods on Firehose using the specified AWS account credentials * provider, client configuration options, and request metric collector. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client connects to Firehose (ex: proxy settings, * retry counts, etc.). * @param requestMetricCollector * optional request metric collector * @deprecated use {@link AmazonKinesisFirehoseClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonKinesisFirehoseClientBuilder#withClientConfiguration(ClientConfiguration)} and * {@link AmazonKinesisFirehoseClientBuilder#withMetricsCollector(RequestMetricCollector)} */ @Deprecated public AmazonKinesisFirehoseClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(clientConfiguration, requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } public static AmazonKinesisFirehoseClientBuilder builder() { return AmazonKinesisFirehoseClientBuilder.standard(); } /** * Constructs a new client to invoke service methods on Firehose using the specified parameters. * *

* All service calls made using this new client object are blocking, and will not return until the service call * completes. * * @param clientParams * Object providing client parameters. */ AmazonKinesisFirehoseClient(AwsSyncClientParams clientParams) { super(clientParams); this.awsCredentialsProvider = clientParams.getCredentialsProvider(); init(); } private void init() { setServiceNameIntern(DEFAULT_SIGNING_NAME); setEndpointPrefix(ENDPOINT_PREFIX); // calling this.setEndPoint(...) will also modify the signer accordingly setEndpoint("https://firehose.us-east-1.amazonaws.com"); HandlerChainFactory chainFactory = new HandlerChainFactory(); requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/kinesisfirehose/request.handlers")); requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/kinesisfirehose/request.handler2s")); requestHandler2s.addAll(chainFactory.getGlobalHandlers()); } /** *

* Creates a delivery stream. *

*

* By default, you can create up to 20 delivery streams per region. *

*

* This is an asynchronous operation that immediately returns. The initial status of the delivery stream is * CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts * data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. * To check the state of a delivery stream, use DescribeDeliveryStream. *

*

* A Kinesis Firehose delivery stream can be configured to receive records directly from providers using * PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its * source. To specify a Kinesis stream as input, set the DeliveryStreamType parameter to * KinesisStreamAsSource, and provide the Kinesis stream ARN and role ARN in the * KinesisStreamSourceConfiguration parameter. *

*

* A delivery stream is configured with a single destination: Amazon S3, Amazon ES, or Amazon Redshift. You must * specify only one of the following destination configuration parameters: * ExtendedS3DestinationConfiguration, S3DestinationConfiguration, * ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration. *

*

* When you specify S3DestinationConfiguration, you can also provide the following optional values: * BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no * BufferingHints value is provided, Kinesis Firehose buffers data up to 5 MB or for 5 minutes, whichever * condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the * service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a * little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend * that you enable encryption to ensure secure data storage in Amazon S3. *

*

* A few notes about Amazon Redshift as a destination: *

*
    *
  • *

    * An Amazon Redshift destination requires an S3 bucket as intermediate location, as Kinesis Firehose first delivers * data to S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified * in the RedshiftDestinationConfiguration.S3Configuration parameter. *

    *
  • *
  • *

    * The compression formats SNAPPY or ZIP cannot be specified in * RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation * that reads from the S3 bucket doesn't support these compression formats. *

    *
  • *
  • *

    * We strongly recommend that you use the user name and password you provide exclusively with Kinesis Firehose, and * that the permissions for the account are restricted for Amazon Redshift INSERT permissions. *

    *
  • *
*

* Kinesis Firehose assumes the IAM role that is configured as part of the destination. The role should allow the * Kinesis Firehose principal to assume the role, and the role should have permissions that allow the service to * deliver the data. For more information, see Amazon S3 Bucket * Access in the Amazon Kinesis Firehose Developer Guide. *

* * @param createDeliveryStreamRequest * @return Result of the CreateDeliveryStream operation returned by the service. * @throws InvalidArgumentException * The specified input parameter has a value that is not valid. * @throws LimitExceededException * You have already reached the limit for a requested resource. * @throws ResourceInUseException * The resource is already in use and not available for this operation. * @sample AmazonKinesisFirehose.CreateDeliveryStream * @see AWS * API Documentation */ @Override public CreateDeliveryStreamResult createDeliveryStream(CreateDeliveryStreamRequest request) { request = beforeClientExecution(request); return executeCreateDeliveryStream(request); } @SdkInternalApi final CreateDeliveryStreamResult executeCreateDeliveryStream(CreateDeliveryStreamRequest createDeliveryStreamRequest) { ExecutionContext executionContext = createExecutionContext(createDeliveryStreamRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateDeliveryStreamRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createDeliveryStreamRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateDeliveryStreamResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Deletes a delivery stream and its data. *

*

* You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in * the CREATING state. While the deletion request is in process, the delivery stream is in the * DELETING state. *

*

* To check the state of a delivery stream, use DescribeDeliveryStream. *

*

* While the delivery stream is DELETING state, the service may continue to accept the records, but the * service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you * should first stop any applications that are sending records before deleting a delivery stream. *

* * @param deleteDeliveryStreamRequest * @return Result of the DeleteDeliveryStream operation returned by the service. * @throws ResourceInUseException * The resource is already in use and not available for this operation. * @throws ResourceNotFoundException * The specified resource could not be found. * @sample AmazonKinesisFirehose.DeleteDeliveryStream * @see AWS * API Documentation */ @Override public DeleteDeliveryStreamResult deleteDeliveryStream(DeleteDeliveryStreamRequest request) { request = beforeClientExecution(request); return executeDeleteDeliveryStream(request); } @SdkInternalApi final DeleteDeliveryStreamResult executeDeleteDeliveryStream(DeleteDeliveryStreamRequest deleteDeliveryStreamRequest) { ExecutionContext executionContext = createExecutionContext(deleteDeliveryStreamRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteDeliveryStreamRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteDeliveryStreamRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteDeliveryStreamResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, * call DescribeDeliveryStream to see if the delivery stream is ACTIVE and therefore ready for * data to be sent to it. *

* * @param describeDeliveryStreamRequest * @return Result of the DescribeDeliveryStream operation returned by the service. * @throws ResourceNotFoundException * The specified resource could not be found. * @sample AmazonKinesisFirehose.DescribeDeliveryStream * @see AWS API Documentation */ @Override public DescribeDeliveryStreamResult describeDeliveryStream(DescribeDeliveryStreamRequest request) { request = beforeClientExecution(request); return executeDescribeDeliveryStream(request); } @SdkInternalApi final DescribeDeliveryStreamResult executeDescribeDeliveryStream(DescribeDeliveryStreamRequest describeDeliveryStreamRequest) { ExecutionContext executionContext = createExecutionContext(describeDeliveryStreamRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeDeliveryStreamRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeDeliveryStreamRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeDeliveryStreamResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Lists your delivery streams. *

*

* The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. * You can limit the number of delivery streams returned, using the Limit parameter. To determine whether * there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there * are more delivery streams to list, you can request them by specifying the name of the last delivery stream * returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call. *

* * @param listDeliveryStreamsRequest * @return Result of the ListDeliveryStreams operation returned by the service. * @sample AmazonKinesisFirehose.ListDeliveryStreams * @see AWS * API Documentation */ @Override public ListDeliveryStreamsResult listDeliveryStreams(ListDeliveryStreamsRequest request) { request = beforeClientExecution(request); return executeListDeliveryStreams(request); } @SdkInternalApi final ListDeliveryStreamsResult executeListDeliveryStreams(ListDeliveryStreamsRequest listDeliveryStreamsRequest) { ExecutionContext executionContext = createExecutionContext(listDeliveryStreamsRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListDeliveryStreamsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listDeliveryStreamsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListDeliveryStreamsResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into * a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. *

*

* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 * MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate * across these two operations for each delivery stream. For more information about limits and how to request an * increase, see Amazon Kinesis Firehose * Limits. *

*

* You must specify the name of the delivery stream and the data record when using PutRecord. The data record * consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a * log file, geographic location data, website clickstream data, and so on. *

*

* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the * destination, a common solution is to use delimiters in the data, such as a newline (\n) or some * other character unique within the data. This allows the consumer application to parse individual data items when * reading the data from the destination. *

*

* The PutRecord operation returns a RecordId, which is a unique string assigned to each record. * Producer applications can use this ID for purposes such as auditability and investigation. *

*

* If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the * exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. *

*

* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream * as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, * the data is no longer available. *

* * @param putRecordRequest * @return Result of the PutRecord operation returned by the service. * @throws ResourceNotFoundException * The specified resource could not be found. * @throws InvalidArgumentException * The specified input parameter has a value that is not valid. * @throws ServiceUnavailableException * The service is unavailable, back off and retry the operation. If you continue to see the exception, * throughput limits for the delivery stream may have been exceeded. For more information about limits and * how to request an increase, see Amazon Kinesis Firehose Limits. * @sample AmazonKinesisFirehose.PutRecord * @see AWS API * Documentation */ @Override public PutRecordResult putRecord(PutRecordRequest request) { request = beforeClientExecution(request); return executePutRecord(request); } @SdkInternalApi final PutRecordResult executePutRecord(PutRecordRequest putRecordRequest) { ExecutionContext executionContext = createExecutionContext(putRecordRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new PutRecordRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(putRecordRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true).withHasStreamingSuccessResponse(false), new PutRecordResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per * producer than when writing single records. To write single data records into a delivery stream, use * PutRecord. Applications using these operations are referred to as producers. *

*

* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 * MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these * two operations for each delivery stream. For more information about limits, see Amazon Kinesis Firehose Limits. *

*

* Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as * 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. *

*

* You must specify the name of the delivery stream and the data record when using PutRecord. The data record * consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a * segment from a log file, geographic location data, web site clickstream data, and so on. *

*

* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the * destination, a common solution is to use delimiters in the data, such as a newline (\n) or some * other character unique within the data. This allows the consumer application to parse individual data items when * reading the data from the destination. *

*

* The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of * responses, RequestResponses. Each entry in the RequestResponses array provides additional * information about the processed record. It directly correlates with a record in the request array using the same * ordering, from the top to the bottom. The response array always includes the same number of records as the * request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis * Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not * stop the processing of subsequent records. *

*

* A successfully processed record includes a RecordId value, which is unique for the record. An * unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode * reflects the type of error, and is one of the following values: ServiceUnavailable or * InternalFailure. ErrorMessage provides more detailed information about the error. *

*

* If there is an internal server error or a timeout, the write might have completed or it might have failed. If * FailedPutCount is greater than 0, retry the request, resending only those records that might have failed * processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and * corresponding charges). We recommend that you handle any duplicates at the destination. *

*

* If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception * persists, it is possible that the throughput limits have been exceeded for the delivery stream. *

*

* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream * as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, * the data is no longer available. *

* * @param putRecordBatchRequest * @return Result of the PutRecordBatch operation returned by the service. * @throws ResourceNotFoundException * The specified resource could not be found. * @throws InvalidArgumentException * The specified input parameter has a value that is not valid. * @throws ServiceUnavailableException * The service is unavailable, back off and retry the operation. If you continue to see the exception, * throughput limits for the delivery stream may have been exceeded. For more information about limits and * how to request an increase, see Amazon Kinesis Firehose Limits. * @sample AmazonKinesisFirehose.PutRecordBatch * @see AWS API * Documentation */ @Override public PutRecordBatchResult putRecordBatch(PutRecordBatchRequest request) { request = beforeClientExecution(request); return executePutRecordBatch(request); } @SdkInternalApi final PutRecordBatchResult executePutRecordBatch(PutRecordBatchRequest putRecordBatchRequest) { ExecutionContext executionContext = createExecutionContext(putRecordBatchRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new PutRecordBatchRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(putRecordBatchRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new PutRecordBatchResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Updates the specified destination of the specified delivery stream. *

*

* You can use this operation to change the destination type (for example, to replace the Amazon S3 destination with * Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name * of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active * while the configurations are updated, so data writes to the delivery stream can continue during this process. The * updated configurations are usually effective within a few minutes. *

*

* Note that switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can * only update to another Amazon ES destination. *

*

* If the destination type is the same, Kinesis Firehose merges the configuration parameters specified with the * destination configuration that already exists on the delivery stream. If any of the parameters are not specified * in the call, the existing values are retained. For example, in the Amazon S3 destination, if * EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained * on the destination. *

*

* If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, * Kinesis Firehose does not merge any parameters. In this case, all parameters must be specified. *

*

* Kinesis Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This * is a required field, and the service updates the configuration only if the existing configuration has a version * ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using * DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next * call. *

* * @param updateDestinationRequest * @return Result of the UpdateDestination operation returned by the service. * @throws InvalidArgumentException * The specified input parameter has a value that is not valid. * @throws ResourceInUseException * The resource is already in use and not available for this operation. * @throws ResourceNotFoundException * The specified resource could not be found. * @throws ConcurrentModificationException * Another modification has already happened. Fetch VersionId again and use it to update the * destination. * @sample AmazonKinesisFirehose.UpdateDestination * @see AWS API * Documentation */ @Override public UpdateDestinationResult updateDestination(UpdateDestinationRequest request) { request = beforeClientExecution(request); return executeUpdateDestination(request); } @SdkInternalApi final UpdateDestinationResult executeUpdateDestination(UpdateDestinationRequest updateDestinationRequest) { ExecutionContext executionContext = createExecutionContext(updateDestinationRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateDestinationRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateDestinationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion()); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler( new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateDestinationResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns additional metadata for a previously executed successful, request, typically used for debugging issues * where a service isn't acting as expected. This data isn't considered part of the result data returned by an * operation, so it's available through this separate, diagnostic interface. *

* Response metadata is only cached for a limited period of time, so if you need to access this extra diagnostic * information for an executed request, you should use this method to retrieve it as soon as possible after * executing the request. * * @param request * The originally executed request * * @return The response metadata for the specified request, or null if none is available. */ public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { return client.getResponseMetadataForRequest(request); } /** * Normal invoke with authentication. Credentials are required and may be overriden at the request level. **/ private Response invoke(Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { executionContext.setCredentialsProvider(CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider)); return doInvoke(request, responseHandler, executionContext); } /** * Invoke with no authentication. Credentials are not required and any credentials set on the client or request will * be ignored for this operation. **/ private Response anonymousInvoke(Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { return doInvoke(request, responseHandler, executionContext); } /** * Invoke the request using the http client. Assumes credentials (or lack thereof) have been configured in the * ExecutionContext beforehand. **/ private Response doInvoke(Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(new JsonErrorResponseMetadata()); return client.execute(request, responseHandler, errorResponseHandler, executionContext); } @com.amazonaws.annotation.SdkInternalApi static com.amazonaws.protocol.json.SdkJsonProtocolFactory getProtocolFactory() { return protocolFactory; } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy