software.amazon.awssdk.services.firehose.FirehoseClient Maven / Gradle / Ivy
Show all versions of kinesis Show documentation
/*
* Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package software.amazon.awssdk.services.firehose;
import java.util.function.Consumer;
import javax.annotation.Generated;
import software.amazon.awssdk.core.SdkClient;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.core.exception.SdkServiceException;
import software.amazon.awssdk.core.regions.ServiceMetadata;
import software.amazon.awssdk.services.firehose.model.ConcurrentModificationException;
import software.amazon.awssdk.services.firehose.model.CreateDeliveryStreamRequest;
import software.amazon.awssdk.services.firehose.model.CreateDeliveryStreamResponse;
import software.amazon.awssdk.services.firehose.model.DeleteDeliveryStreamRequest;
import software.amazon.awssdk.services.firehose.model.DeleteDeliveryStreamResponse;
import software.amazon.awssdk.services.firehose.model.DescribeDeliveryStreamRequest;
import software.amazon.awssdk.services.firehose.model.DescribeDeliveryStreamResponse;
import software.amazon.awssdk.services.firehose.model.FirehoseException;
import software.amazon.awssdk.services.firehose.model.InvalidArgumentException;
import software.amazon.awssdk.services.firehose.model.LimitExceededException;
import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsRequest;
import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsResponse;
import software.amazon.awssdk.services.firehose.model.PutRecordBatchRequest;
import software.amazon.awssdk.services.firehose.model.PutRecordBatchResponse;
import software.amazon.awssdk.services.firehose.model.PutRecordRequest;
import software.amazon.awssdk.services.firehose.model.PutRecordResponse;
import software.amazon.awssdk.services.firehose.model.ResourceInUseException;
import software.amazon.awssdk.services.firehose.model.ResourceNotFoundException;
import software.amazon.awssdk.services.firehose.model.ServiceUnavailableException;
import software.amazon.awssdk.services.firehose.model.UpdateDestinationRequest;
import software.amazon.awssdk.services.firehose.model.UpdateDestinationResponse;
import software.amazon.awssdk.utils.SdkAutoCloseable;
/**
* Service client for accessing Firehose. This can be created using the static {@link #builder()} method.
*
* Amazon Kinesis Firehose API Reference
*
* Amazon Kinesis Firehose is a fully managed service that delivers real-time streaming data to destinations such as
* Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.
*
*/
@Generated("software.amazon.awssdk:codegen")
public interface FirehoseClient extends SdkClient, SdkAutoCloseable {
String SERVICE_NAME = "firehose";
/**
* Create a {@link FirehoseClient} with the region loaded from the
* {@link software.amazon.awssdk.core.regions.providers.DefaultAwsRegionProviderChain} and credentials loaded from
* the {@link software.amazon.awssdk.core.auth.DefaultCredentialsProvider}.
*/
static FirehoseClient create() {
return builder().build();
}
/**
* Create a builder that can be used to configure and create a {@link FirehoseClient}.
*/
static FirehoseClientBuilder builder() {
return new DefaultFirehoseClientBuilder();
}
/**
*
* Creates a delivery stream.
*
*
* By default, you can create up to 20 delivery streams per region.
*
*
* This is an asynchronous operation that immediately returns. The initial status of the delivery stream is
* CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts
* data. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception.
* To check the state of a delivery stream, use DescribeDeliveryStream.
*
*
* A Kinesis Firehose delivery stream can be configured to receive records directly from providers using
* PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its
* source. To specify a Kinesis stream as input, set the DeliveryStreamType
parameter to
* KinesisStreamAsSource
, and provide the Kinesis stream ARN and role ARN in the
* KinesisStreamSourceConfiguration
parameter.
*
*
* A delivery stream is configured with a single destination: Amazon S3, Amazon ES, or Amazon Redshift. You must
* specify only one of the following destination configuration parameters:
* ExtendedS3DestinationConfiguration, S3DestinationConfiguration,
* ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration.
*
*
* When you specify S3DestinationConfiguration, you can also provide the following optional values:
* BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no
* BufferingHints value is provided, Kinesis Firehose buffers data up to 5 MB or for 5 minutes, whichever
* condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the
* service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a
* little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend
* that you enable encryption to ensure secure data storage in Amazon S3.
*
*
* A few notes about Amazon Redshift as a destination:
*
*
* -
*
* An Amazon Redshift destination requires an S3 bucket as intermediate location, as Kinesis Firehose first delivers
* data to S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified
* in the RedshiftDestinationConfiguration.S3Configuration parameter.
*
*
* -
*
* The compression formats SNAPPY
or ZIP
cannot be specified in
* RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY
operation
* that reads from the S3 bucket doesn't support these compression formats.
*
*
* -
*
* We strongly recommend that you use the user name and password you provide exclusively with Kinesis Firehose, and
* that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
*
*
*
*
* Kinesis Firehose assumes the IAM role that is configured as part of the destination. The role should allow the
* Kinesis Firehose principal to assume the role, and the role should have permissions that allow the service to
* deliver the data. For more information, see Amazon S3 Bucket
* Access in the Amazon Kinesis Firehose Developer Guide.
*
*
* @param createDeliveryStreamRequest
* @return Result of the CreateDeliveryStream operation returned by the service.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws LimitExceededException
* You have already reached the limit for a requested resource.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.CreateDeliveryStream
* @see AWS
* API Documentation
*/
default CreateDeliveryStreamResponse createDeliveryStream(CreateDeliveryStreamRequest createDeliveryStreamRequest)
throws InvalidArgumentException, LimitExceededException, ResourceInUseException, SdkServiceException,
SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Creates a delivery stream.
*
*
* By default, you can create up to 20 delivery streams per region.
*
*
* This is an asynchronous operation that immediately returns. The initial status of the delivery stream is
* CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts
* data. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception.
* To check the state of a delivery stream, use DescribeDeliveryStream.
*
*
* A Kinesis Firehose delivery stream can be configured to receive records directly from providers using
* PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its
* source. To specify a Kinesis stream as input, set the DeliveryStreamType
parameter to
* KinesisStreamAsSource
, and provide the Kinesis stream ARN and role ARN in the
* KinesisStreamSourceConfiguration
parameter.
*
*
* A delivery stream is configured with a single destination: Amazon S3, Amazon ES, or Amazon Redshift. You must
* specify only one of the following destination configuration parameters:
* ExtendedS3DestinationConfiguration, S3DestinationConfiguration,
* ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration.
*
*
* When you specify S3DestinationConfiguration, you can also provide the following optional values:
* BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no
* BufferingHints value is provided, Kinesis Firehose buffers data up to 5 MB or for 5 minutes, whichever
* condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the
* service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a
* little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend
* that you enable encryption to ensure secure data storage in Amazon S3.
*
*
* A few notes about Amazon Redshift as a destination:
*
*
* -
*
* An Amazon Redshift destination requires an S3 bucket as intermediate location, as Kinesis Firehose first delivers
* data to S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified
* in the RedshiftDestinationConfiguration.S3Configuration parameter.
*
*
* -
*
* The compression formats SNAPPY
or ZIP
cannot be specified in
* RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY
operation
* that reads from the S3 bucket doesn't support these compression formats.
*
*
* -
*
* We strongly recommend that you use the user name and password you provide exclusively with Kinesis Firehose, and
* that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
*
*
*
*
* Kinesis Firehose assumes the IAM role that is configured as part of the destination. The role should allow the
* Kinesis Firehose principal to assume the role, and the role should have permissions that allow the service to
* deliver the data. For more information, see Amazon S3 Bucket
* Access in the Amazon Kinesis Firehose Developer Guide.
*
*
* @param createDeliveryStreamRequest
* @return Result of the CreateDeliveryStream operation returned by the service.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws LimitExceededException
* You have already reached the limit for a requested resource.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.CreateDeliveryStream
* @see AWS
* API Documentation
*/
default CreateDeliveryStreamResponse createDeliveryStream(
Consumer createDeliveryStreamRequest) throws InvalidArgumentException,
LimitExceededException, ResourceInUseException, SdkServiceException, SdkClientException, FirehoseException {
return createDeliveryStream(CreateDeliveryStreamRequest.builder().apply(createDeliveryStreamRequest).build());
}
/**
*
* Deletes a delivery stream and its data.
*
*
* You can delete a delivery stream only if it is in ACTIVE
or DELETING
state, and not in
* the CREATING
state. While the deletion request is in process, the delivery stream is in the
* DELETING
state.
*
*
* To check the state of a delivery stream, use DescribeDeliveryStream.
*
*
* While the delivery stream is DELETING
state, the service may continue to accept the records, but the
* service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you
* should first stop any applications that are sending records before deleting a delivery stream.
*
*
* @param deleteDeliveryStreamRequest
* @return Result of the DeleteDeliveryStream operation returned by the service.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.DeleteDeliveryStream
* @see AWS
* API Documentation
*/
default DeleteDeliveryStreamResponse deleteDeliveryStream(DeleteDeliveryStreamRequest deleteDeliveryStreamRequest)
throws ResourceInUseException, ResourceNotFoundException, SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Deletes a delivery stream and its data.
*
*
* You can delete a delivery stream only if it is in ACTIVE
or DELETING
state, and not in
* the CREATING
state. While the deletion request is in process, the delivery stream is in the
* DELETING
state.
*
*
* To check the state of a delivery stream, use DescribeDeliveryStream.
*
*
* While the delivery stream is DELETING
state, the service may continue to accept the records, but the
* service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you
* should first stop any applications that are sending records before deleting a delivery stream.
*
*
* @param deleteDeliveryStreamRequest
* @return Result of the DeleteDeliveryStream operation returned by the service.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.DeleteDeliveryStream
* @see AWS
* API Documentation
*/
default DeleteDeliveryStreamResponse deleteDeliveryStream(
Consumer deleteDeliveryStreamRequest) throws ResourceInUseException,
ResourceNotFoundException, SdkServiceException, SdkClientException, FirehoseException {
return deleteDeliveryStream(DeleteDeliveryStreamRequest.builder().apply(deleteDeliveryStreamRequest).build());
}
/**
*
* Describes the specified delivery stream and gets the status. For example, after your delivery stream is created,
* call DescribeDeliveryStream to see if the delivery stream is ACTIVE
and therefore ready for
* data to be sent to it.
*
*
* @param describeDeliveryStreamRequest
* @return Result of the DescribeDeliveryStream operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.DescribeDeliveryStream
* @see AWS API Documentation
*/
default DescribeDeliveryStreamResponse describeDeliveryStream(DescribeDeliveryStreamRequest describeDeliveryStreamRequest)
throws ResourceNotFoundException, SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Describes the specified delivery stream and gets the status. For example, after your delivery stream is created,
* call DescribeDeliveryStream to see if the delivery stream is ACTIVE
and therefore ready for
* data to be sent to it.
*
*
* @param describeDeliveryStreamRequest
* @return Result of the DescribeDeliveryStream operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.DescribeDeliveryStream
* @see AWS API Documentation
*/
default DescribeDeliveryStreamResponse describeDeliveryStream(
Consumer describeDeliveryStreamRequest) throws ResourceNotFoundException,
SdkServiceException, SdkClientException, FirehoseException {
return describeDeliveryStream(DescribeDeliveryStreamRequest.builder().apply(describeDeliveryStreamRequest).build());
}
/**
*
* Lists your delivery streams.
*
*
* The number of delivery streams might be too large to return using a single call to ListDeliveryStreams.
* You can limit the number of delivery streams returned, using the Limit parameter. To determine whether
* there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there
* are more delivery streams to list, you can request them by specifying the name of the last delivery stream
* returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call.
*
*
* @return Result of the ListDeliveryStreams operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.ListDeliveryStreams
* @see #listDeliveryStreams(ListDeliveryStreamsRequest)
* @see AWS
* API Documentation
*/
default ListDeliveryStreamsResponse listDeliveryStreams() throws SdkServiceException, SdkClientException, FirehoseException {
return listDeliveryStreams(ListDeliveryStreamsRequest.builder().build());
}
/**
*
* Lists your delivery streams.
*
*
* The number of delivery streams might be too large to return using a single call to ListDeliveryStreams.
* You can limit the number of delivery streams returned, using the Limit parameter. To determine whether
* there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there
* are more delivery streams to list, you can request them by specifying the name of the last delivery stream
* returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call.
*
*
* @param listDeliveryStreamsRequest
* @return Result of the ListDeliveryStreams operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.ListDeliveryStreams
* @see AWS
* API Documentation
*/
default ListDeliveryStreamsResponse listDeliveryStreams(ListDeliveryStreamsRequest listDeliveryStreamsRequest)
throws SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Lists your delivery streams.
*
*
* The number of delivery streams might be too large to return using a single call to ListDeliveryStreams.
* You can limit the number of delivery streams returned, using the Limit parameter. To determine whether
* there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there
* are more delivery streams to list, you can request them by specifying the name of the last delivery stream
* returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call.
*
*
* @param listDeliveryStreamsRequest
* @return Result of the ListDeliveryStreams operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.ListDeliveryStreams
* @see AWS
* API Documentation
*/
default ListDeliveryStreamsResponse listDeliveryStreams(
Consumer listDeliveryStreamsRequest) throws SdkServiceException,
SdkClientException, FirehoseException {
return listDeliveryStreams(ListDeliveryStreamsRequest.builder().apply(listDeliveryStreamsRequest).build());
}
/**
*
* Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into
* a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
*
*
* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5
* MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate
* across these two operations for each delivery stream. For more information about limits and how to request an
* increase, see Amazon Kinesis Firehose
* Limits.
*
*
* You must specify the name of the delivery stream and the data record when using PutRecord. The data record
* consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a
* log file, geographic location data, website clickstream data, and so on.
*
*
* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the
* destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer application to parse individual data items when
* reading the data from the destination.
*
*
* The PutRecord operation returns a RecordId, which is a unique string assigned to each record.
* Producer applications can use this ID for purposes such as auditability and investigation.
*
*
* If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the
* exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
*
*
* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream
* as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordRequest
* @return Result of the PutRecord operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If you continue to see the exception,
* throughput limits for the delivery stream may have been exceeded. For more information about limits and
* how to request an increase, see Amazon Kinesis Firehose Limits.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.PutRecord
* @see AWS API
* Documentation
*/
default PutRecordResponse putRecord(PutRecordRequest putRecordRequest) throws ResourceNotFoundException,
InvalidArgumentException, ServiceUnavailableException, SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into
* a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
*
*
* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5
* MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate
* across these two operations for each delivery stream. For more information about limits and how to request an
* increase, see Amazon Kinesis Firehose
* Limits.
*
*
* You must specify the name of the delivery stream and the data record when using PutRecord. The data record
* consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a
* log file, geographic location data, website clickstream data, and so on.
*
*
* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the
* destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer application to parse individual data items when
* reading the data from the destination.
*
*
* The PutRecord operation returns a RecordId, which is a unique string assigned to each record.
* Producer applications can use this ID for purposes such as auditability and investigation.
*
*
* If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the
* exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
*
*
* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream
* as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordRequest
* @return Result of the PutRecord operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If you continue to see the exception,
* throughput limits for the delivery stream may have been exceeded. For more information about limits and
* how to request an increase, see Amazon Kinesis Firehose Limits.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.PutRecord
* @see AWS API
* Documentation
*/
default PutRecordResponse putRecord(Consumer putRecordRequest) throws ResourceNotFoundException,
InvalidArgumentException, ServiceUnavailableException, SdkServiceException, SdkClientException, FirehoseException {
return putRecord(PutRecordRequest.builder().apply(putRecordRequest).build());
}
/**
*
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per
* producer than when writing single records. To write single data records into a delivery stream, use
* PutRecord. Applications using these operations are referred to as producers.
*
*
* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5
* MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these
* two operations for each delivery stream. For more information about limits, see Amazon Kinesis Firehose Limits.
*
*
* Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as
* 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
*
*
* You must specify the name of the delivery stream and the data record when using PutRecord. The data record
* consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a
* segment from a log file, geographic location data, web site clickstream data, and so on.
*
*
* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the
* destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer application to parse individual data items when
* reading the data from the destination.
*
*
* The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of
* responses, RequestResponses. Each entry in the RequestResponses array provides additional
* information about the processed record. It directly correlates with a record in the request array using the same
* ordering, from the top to the bottom. The response array always includes the same number of records as the
* request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis
* Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not
* stop the processing of subsequent records.
*
*
* A successfully processed record includes a RecordId value, which is unique for the record. An
* unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode
* reflects the type of error, and is one of the following values: ServiceUnavailable
or
* InternalFailure
. ErrorMessage provides more detailed information about the error.
*
*
* If there is an internal server error or a timeout, the write might have completed or it might have failed. If
* FailedPutCount is greater than 0, retry the request, resending only those records that might have failed
* processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and
* corresponding charges). We recommend that you handle any duplicates at the destination.
*
*
* If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception
* persists, it is possible that the throughput limits have been exceeded for the delivery stream.
*
*
* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream
* as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordBatchRequest
* @return Result of the PutRecordBatch operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If you continue to see the exception,
* throughput limits for the delivery stream may have been exceeded. For more information about limits and
* how to request an increase, see Amazon Kinesis Firehose Limits.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.PutRecordBatch
* @see AWS API
* Documentation
*/
default PutRecordBatchResponse putRecordBatch(PutRecordBatchRequest putRecordBatchRequest) throws ResourceNotFoundException,
InvalidArgumentException, ServiceUnavailableException, SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per
* producer than when writing single records. To write single data records into a delivery stream, use
* PutRecord. Applications using these operations are referred to as producers.
*
*
* By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5
* MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these
* two operations for each delivery stream. For more information about limits, see Amazon Kinesis Firehose Limits.
*
*
* Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as
* 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
*
*
* You must specify the name of the delivery stream and the data record when using PutRecord. The data record
* consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a
* segment from a log file, geographic location data, web site clickstream data, and so on.
*
*
* Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the
* destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some
* other character unique within the data. This allows the consumer application to parse individual data items when
* reading the data from the destination.
*
*
* The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of
* responses, RequestResponses. Each entry in the RequestResponses array provides additional
* information about the processed record. It directly correlates with a record in the request array using the same
* ordering, from the top to the bottom. The response array always includes the same number of records as the
* request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis
* Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not
* stop the processing of subsequent records.
*
*
* A successfully processed record includes a RecordId value, which is unique for the record. An
* unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode
* reflects the type of error, and is one of the following values: ServiceUnavailable
or
* InternalFailure
. ErrorMessage provides more detailed information about the error.
*
*
* If there is an internal server error or a timeout, the write might have completed or it might have failed. If
* FailedPutCount is greater than 0, retry the request, resending only those records that might have failed
* processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and
* corresponding charges). We recommend that you handle any duplicates at the destination.
*
*
* If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception
* persists, it is possible that the throughput limits have been exceeded for the delivery stream.
*
*
* Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream
* as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours,
* the data is no longer available.
*
*
* @param putRecordBatchRequest
* @return Result of the PutRecordBatch operation returned by the service.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ServiceUnavailableException
* The service is unavailable, back off and retry the operation. If you continue to see the exception,
* throughput limits for the delivery stream may have been exceeded. For more information about limits and
* how to request an increase, see Amazon Kinesis Firehose Limits.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.PutRecordBatch
* @see AWS API
* Documentation
*/
default PutRecordBatchResponse putRecordBatch(Consumer putRecordBatchRequest)
throws ResourceNotFoundException, InvalidArgumentException, ServiceUnavailableException, SdkServiceException,
SdkClientException, FirehoseException {
return putRecordBatch(PutRecordBatchRequest.builder().apply(putRecordBatchRequest).build());
}
/**
*
* Updates the specified destination of the specified delivery stream.
*
*
* You can use this operation to change the destination type (for example, to replace the Amazon S3 destination with
* Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name
* of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active
* while the configurations are updated, so data writes to the delivery stream can continue during this process. The
* updated configurations are usually effective within a few minutes.
*
*
* Note that switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can
* only update to another Amazon ES destination.
*
*
* If the destination type is the same, Kinesis Firehose merges the configuration parameters specified with the
* destination configuration that already exists on the delivery stream. If any of the parameters are not specified
* in the call, the existing values are retained. For example, in the Amazon S3 destination, if
* EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained
* on the destination.
*
*
* If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift,
* Kinesis Firehose does not merge any parameters. In this case, all parameters must be specified.
*
*
* Kinesis Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This
* is a required field, and the service updates the configuration only if the existing configuration has a version
* ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using
* DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next
* call.
*
*
* @param updateDestinationRequest
* @return Result of the UpdateDestination operation returned by the service.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws ConcurrentModificationException
* Another modification has already happened. Fetch VersionId again and use it to update the
* destination.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.UpdateDestination
* @see AWS API
* Documentation
*/
default UpdateDestinationResponse updateDestination(UpdateDestinationRequest updateDestinationRequest)
throws InvalidArgumentException, ResourceInUseException, ResourceNotFoundException, ConcurrentModificationException,
SdkServiceException, SdkClientException, FirehoseException {
throw new UnsupportedOperationException();
}
/**
*
* Updates the specified destination of the specified delivery stream.
*
*
* You can use this operation to change the destination type (for example, to replace the Amazon S3 destination with
* Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name
* of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active
* while the configurations are updated, so data writes to the delivery stream can continue during this process. The
* updated configurations are usually effective within a few minutes.
*
*
* Note that switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can
* only update to another Amazon ES destination.
*
*
* If the destination type is the same, Kinesis Firehose merges the configuration parameters specified with the
* destination configuration that already exists on the delivery stream. If any of the parameters are not specified
* in the call, the existing values are retained. For example, in the Amazon S3 destination, if
* EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained
* on the destination.
*
*
* If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift,
* Kinesis Firehose does not merge any parameters. In this case, all parameters must be specified.
*
*
* Kinesis Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This
* is a required field, and the service updates the configuration only if the existing configuration has a version
* ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using
* DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next
* call.
*
*
* @param updateDestinationRequest
* @return Result of the UpdateDestination operation returned by the service.
* @throws InvalidArgumentException
* The specified input parameter has a value that is not valid.
* @throws ResourceInUseException
* The resource is already in use and not available for this operation.
* @throws ResourceNotFoundException
* The specified resource could not be found.
* @throws ConcurrentModificationException
* Another modification has already happened. Fetch VersionId again and use it to update the
* destination.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws FirehoseException
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample FirehoseClient.UpdateDestination
* @see AWS API
* Documentation
*/
default UpdateDestinationResponse updateDestination(Consumer updateDestinationRequest)
throws InvalidArgumentException, ResourceInUseException, ResourceNotFoundException, ConcurrentModificationException,
SdkServiceException, SdkClientException, FirehoseException {
return updateDestination(UpdateDestinationRequest.builder().apply(updateDestinationRequest).build());
}
static ServiceMetadata serviceMetadata() {
return ServiceMetadata.of("firehose");
}
}