com.amazonaws.services.kinesis.AmazonKinesisClient Maven / Gradle / Ivy
Show all versions of aws-java-sdk Show documentation
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.kinesis;
import java.net.*;
import java.util.*;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.regions.*;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.regions.*;
import com.amazonaws.internal.*;
import com.amazonaws.metrics.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.util.json.*;
import com.amazonaws.services.kinesis.model.*;
import com.amazonaws.services.kinesis.model.transform.*;
/**
* Client for accessing AmazonKinesis. All service calls made
* using this client are blocking, and will not return until the service call
* completes.
*
* Amazon Kinesis Service API Reference
* Amazon Kinesis is a managed service that scales elastically for real
* time processing of streaming big data.
*
*/
public class AmazonKinesisClient extends AmazonWebServiceClient implements AmazonKinesis {
/** Provider for AWS credentials. */
private AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonKinesis.class);
/**
* List of exception unmarshallers for all AmazonKinesis exceptions.
*/
protected List jsonErrorUnmarshallers;
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis. A credentials provider chain will be used
* that searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @see DefaultAWSCredentialsProviderChain
*/
public AmazonKinesisClient() {
this(new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis. A credentials provider chain will be used
* that searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonKinesis
* (ex: proxy settings, retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
*/
public AmazonKinesisClient(ClientConfiguration clientConfiguration) {
this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis using the specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentials The AWS credentials (access key ID and secret key) to use
* when authenticating with AWS services.
*/
public AmazonKinesisClient(AWSCredentials awsCredentials) {
this(awsCredentials, new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis using the specified AWS account credentials
* and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentials The AWS credentials (access key ID and secret key) to use
* when authenticating with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonKinesis
* (ex: proxy settings, retry counts, etc.).
*/
public AmazonKinesisClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
super(adjustClientConfiguration(clientConfiguration));
this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials);
init();
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis using the specified AWS account credentials provider.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
*/
public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, new ClientConfiguration());
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonKinesis
* (ex: proxy settings, retry counts, etc.).
*/
public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new client to invoke service methods on
* AmazonKinesis using the specified AWS account credentials
* provider, client configuration options and request metric collector.
*
*
* All service calls made using this new client object are blocking, and will not
* return until the service call completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration The client configuration options controlling how this
* client connects to AmazonKinesis
* (ex: proxy settings, retry counts, etc.).
* @param requestMetricCollector optional request metric collector
*/
public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(adjustClientConfiguration(clientConfiguration), requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
init();
}
private void init() {
jsonErrorUnmarshallers = new ArrayList();
jsonErrorUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new ResourceInUseExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new InvalidArgumentExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new ExpiredIteratorExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new ProvisionedThroughputExceededExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new ResourceNotFoundExceptionUnmarshaller());
jsonErrorUnmarshallers.add(new JsonErrorUnmarshaller());
// calling this.setEndPoint(...) will also modify the signer accordingly
this.setEndpoint("kinesis.us-east-1.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain(
"/com/amazonaws/services/kinesis/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain(
"/com/amazonaws/services/kinesis/request.handler2s"));
}
private static ClientConfiguration adjustClientConfiguration(ClientConfiguration orig) {
ClientConfiguration config = orig;
return config;
}
/**
*
* This operation returns the following information about the stream: the
* current status of the stream, the stream Amazon Resource Name (ARN),
* and an array of shard objects that comprise the stream. For each shard
* object there is information about the hash key and sequence number
* ranges that the shard spans, and the IDs of any earlier shards that
* played in a role in a MergeShards or SplitShard operation that created
* the shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You can limit the number of returned shards using the
* Limit
parameter. The number of shards in a stream may be
* too large to return from a single call to DescribeStream
* .
* You can detect this by using the
* HasMoreShards
flag in the returned output.
* HasMoreShards
is set to true
when there is
* more data available.
*
*
* If there are more shards available, you can request more shards by
* using the shard ID of the last shard returned by the
* DescribeStream
request, in the
* ExclusiveStartShardId
parameter in a subsequent request
* to DescribeStream
.
*
* DescribeStream
is a paginated
* operation.
*
*
* DescribeStream
has a limit of 10 transactions per second
* per account.
*
*
* @param describeStreamRequest Container for the necessary parameters to
* execute the DescribeStream service method on AmazonKinesis.
*
* @return The response from the DescribeStream service method, as
* returned by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeStreamResult describeStream(DescribeStreamRequest describeStreamRequest) {
ExecutionContext executionContext = createExecutionContext(describeStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeStreamRequestMarshaller().marshall(describeStreamRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
Unmarshaller unmarshaller = new DescribeStreamResultJsonUnmarshaller();
JsonResponseHandler responseHandler = new JsonResponseHandler(unmarshaller);
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response, LOGGING_AWS_REQUEST_METRIC);
}
}
/**
*
* This operation returns a shard iterator in ShardIterator
* . The shard iterator specifies the position in the shard from which
* you want to start reading data records sequentially. A shard iterator
* specifies this position using the sequence number of a data record in
* a shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You must specify the shard iterator type in the
* GetShardIterator
request. For example, you can set the
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence
* number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using
* sequence numbers returned by earlier PutRecord, GetRecords or
* DescribeStream requests. You can specify the shard iterator type
* TRIM_HORIZON in the request to cause ShardIterator
to
* point to the last untrimmed record in the shard in the system, which
* is the oldest data record in the shard. Or you can point to just after
* the most recent record in the shard, by using the shard iterator type
* LATEST, so that you always read the most recent data in the shard.
*
*
* Note: Each shard iterator expires five minutes after it is
* returned to the requester.
*
*
* When you repeatedly read from an Amazon Kinesis stream use a
* GetShardIterator request to get the first shard iterator to to use in
* your first GetRecords
request and then use the shard
* iterator returned by the GetRecords
request in
* NextShardIterator
for subsequent reads. A new shard
* iterator is returned by every GetRecords
request in
* NextShardIterator
,
* which you use in the ShardIterator
parameter
* of the next GetRecords
request.
*
*
* If a GetShardIterator
request is made too often, you will
* receive a ProvisionedThroughputExceededException
.
* For more information about throughput limits, see the
* Amazon Kinesis Developer Guide
* .
*
*
* GetShardIterator
can return null
for its
* ShardIterator
to indicate that the shard has been closed
* and that the requested iterator will return no more data. A shard can
* be closed by a SplitShard or MergeShards operation.
*
*
* GetShardIterator
has a limit of 5 transactions per
* second per account per open shard.
*
*
* @param getShardIteratorRequest Container for the necessary parameters
* to execute the GetShardIterator service method on AmazonKinesis.
*
* @return The response from the GetShardIterator service method, as
* returned by AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public GetShardIteratorResult getShardIterator(GetShardIteratorRequest getShardIteratorRequest) {
ExecutionContext executionContext = createExecutionContext(getShardIteratorRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new GetShardIteratorRequestMarshaller().marshall(getShardIteratorRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
Unmarshaller unmarshaller = new GetShardIteratorResultJsonUnmarshaller();
JsonResponseHandler responseHandler = new JsonResponseHandler(unmarshaller);
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response, LOGGING_AWS_REQUEST_METRIC);
}
}
/**
*
* This operation puts a data record into an Amazon Kinesis stream from a
* producer. This operation must be called to send data from the producer
* into the Amazon Kinesis stream for real-time ingestion and subsequent
* processing. The PutRecord
operation requires the name of
* the stream that captures, stores, and transports the data; a partition
* key; and the data blob itself. The data blob could be a segment from a
* log file, geographic/location data, website clickstream data, or any
* other data type.
*
*
* The partition key is used to distribute data across shards. Amazon
* Kinesis segregates the data records that belong to a data stream into
* multiple shards, using the partition key associated with each data
* record to determine which shard a given data record belongs to.
*
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* bytes. An MD5 hash function is used to map partition keys to 128-bit
* integer values and to map associated data records to shards using the
* hash key ranges of the shards. You can override hashing the partition
* key to determine the shard by explicitly specifying a hash value using
* the ExplicitHashKey
parameter. For more information, see
* the
* Amazon Kinesis Developer Guide
* .
*
*
* PutRecord
returns the shard ID of where the data record
* was placed and the sequence number that was assigned to the data
* record.
*
*
* Sequence numbers generally increase over time. To guarantee strictly
* increasing ordering, use the SequenceNumberForOrdering
* parameter. For more information, see the
* Amazon Kinesis Developer Guide
* .
*
*
* If a PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the
* request, PutRecord
throws
* ProvisionedThroughputExceededException
.
*
*
* Data records are accessible for only 24 hours from the time that they
* are added to an Amazon Kinesis stream.
*
*
* @param putRecordRequest Container for the necessary parameters to
* execute the PutRecord service method on AmazonKinesis.
*
* @return The response from the PutRecord service method, as returned by
* AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public PutRecordResult putRecord(PutRecordRequest putRecordRequest) {
ExecutionContext executionContext = createExecutionContext(putRecordRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutRecordRequestMarshaller().marshall(putRecordRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
Unmarshaller unmarshaller = new PutRecordResultJsonUnmarshaller();
JsonResponseHandler responseHandler = new JsonResponseHandler(unmarshaller);
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response, LOGGING_AWS_REQUEST_METRIC);
}
}
/**
*
* This operation returns one or more data records from a shard. A
* GetRecords
operation request can retrieve up to 10 MB of
* data.
*
*
* You specify a shard iterator for the shard that you want to read data
* from in the ShardIterator
parameter. The shard iterator
* specifies the position in the shard from which you want to start
* reading data records sequentially. A shard iterator specifies this
* position using the sequence number of a data record in the shard. For
* more information about the shard iterator, see GetShardIterator.
*
*
* GetRecords
may return a partial result if the response
* size limit is exceeded. You will get an error, but not a partial
* result if the shard's provisioned throughput is exceeded, the shard
* iterator has expired, or an internal processing failure has occurred.
* Clients can request a smaller amount of data by specifying a maximum
* number of returned records using the Limit
parameter. The
* Limit
parameter can be set to an integer value of up to
* 10,000. If you set the value to an integer greater than 10,000, you
* will receive InvalidArgumentException
.
*
*
* A new shard iterator is returned by every GetRecords
* request in NextShardIterator
,
* which you use in the ShardIterator
parameter
* of the next GetRecords
request. When you repeatedly read
* from an Amazon Kinesis stream use a GetShardIterator request to get
* the first shard iterator to use in your first GetRecords
* request and then use the shard iterator returned in
* NextShardIterator
for subsequent reads.
*
*
* GetRecords
can return null
for the
* NextShardIterator
to reflect that the shard has been
* closed and that the requested shard iterator would never have returned
* more data.
*
*
* If no items can be processed because of insufficient provisioned
* throughput on the shard involved in the request,
* GetRecords
throws
* ProvisionedThroughputExceededException
.
*
*
* @param getRecordsRequest Container for the necessary parameters to
* execute the GetRecords service method on AmazonKinesis.
*
* @return The response from the GetRecords service method, as returned
* by AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws ExpiredIteratorException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public GetRecordsResult getRecords(GetRecordsRequest getRecordsRequest) {
ExecutionContext executionContext = createExecutionContext(getRecordsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new GetRecordsRequestMarshaller().marshall(getRecordsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
Unmarshaller unmarshaller = new GetRecordsResultJsonUnmarshaller();
JsonResponseHandler responseHandler = new JsonResponseHandler(unmarshaller);
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response, LOGGING_AWS_REQUEST_METRIC);
}
}
/**
*
* This operation splits a shard into two new shards in the stream, to
* increase the stream's capacity to ingest and transport data.
* SplitShard
is called when there is a need to increase the
* overall capacity of stream because of an expected increase in the
* volume of data records being ingested.
*
*
* SplitShard
can also be used when a given shard appears
* to be approaching its maximum utilization, for example, when the set
* of producers sending data into the specific shard are suddenly sending
* more than previously anticipated. You can also call the
* SplitShard
operation to increase stream capacity, so that
* more Amazon Kinesis applications can simultaneously read data from the
* stream for real-time processing.
*
*
* The SplitShard
operation requires that you specify the
* shard to be split and the new hash key, which is the position in the
* shard where the shard gets split in two. In many cases, the new hash
* key might simply be the average of the beginning and ending hash key,
* but it can be any hash key value in the range being mapped into the
* shard. For more information about splitting shards, see the
* Amazon Kinesis Developer Guide
* .
*
*
* You can use the DescribeStream operation to determine the shard ID and
* hash key values for the ShardToSplit
and
* NewStartingHashKey
parameters that are specified in the
* SplitShard
request.
*
*
* SplitShard
is an asynchronous operation. Upon receiving
* a SplitShard
request, Amazon Kinesis immediately returns
* a response and sets the stream status to UPDATING. After the operation
* is completed, Amazon Kinesis sets the stream status to ACTIVE. Read
* and write operations continue to work while the stream is in the
* UPDATING state.
*
*
* You can use DescribeStream
to check the status of the
* stream, which is returned in StreamStatus
.
* If the stream is in the ACTIVE state, you can call
* SplitShard
.
* If a stream is in CREATING or UPDATING or DELETING
* states, then Amazon Kinesis returns a
* ResourceInUseException
.
*
*
* If the specified stream does not exist, Amazon Kinesis returns a
* ResourceNotFoundException
.
* If you try to create more shards than are authorized
* for your account, you receive a LimitExceededException
.
*
*
* Note: The default limit for an AWS account is 10 shards per
* stream. If you need to create a stream with more than 10 shards,
* contact AWS Support
* to increase the limit on your account.
*
*
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream, MergeShards or SplitShard, you will
* receive a LimitExceededException
.
*
*
* SplitShard
has limit of 5 transactions per second per
* account.
*
*
* @param splitShardRequest Container for the necessary parameters to
* execute the SplitShard service method on AmazonKinesis.
*
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void splitShard(SplitShardRequest splitShardRequest) {
ExecutionContext executionContext = createExecutionContext(splitShardRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request;
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new SplitShardRequestMarshaller().marshall(splitShardRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
JsonResponseHandler responseHandler = new JsonResponseHandler(null);
invoke(request, responseHandler, executionContext);
}
/**
*
* This operation adds a new Amazon Kinesis stream to your AWS account. A
* stream captures and transports data records that are continuously
* emitted from different data sources or producers .
* Scale-out within an Amazon Kinesis stream is explicitly
* supported by means of shards, which are uniquely identified groups of
* data records in an Amazon Kinesis stream.
*
*
* You specify and control the number of shards that a stream is composed
* of. Each open shard can support up to 5 read transactions per second,
* up to a maximum total of 2 MB of data read per second. Each shard can
* support up to 1000 write transactions per second, up to a maximum
* total of 1 MB data written per second. You can add shards to a stream
* if the amount of data input increases and you can remove shards if the
* amount of data input decreases.
*
*
* The stream name identifies the stream. The name is scoped to the AWS
* account used by the application. It is also scoped by region. That is,
* two streams in two different accounts can have the same name, and two
* streams in the same account, but in two different regions, can have
* the same name.
*
*
* CreateStream
is an asynchronous operation. Upon
* receiving a CreateStream
request, Amazon Kinesis
* immediately returns and sets the stream status to CREATING. After the
* stream is created, Amazon Kinesis sets the stream status to ACTIVE.
* You should perform read and write operations only on an ACTIVE stream.
*
*
* You receive a LimitExceededException
when making a
* CreateStream
request if you try to do one of the
* following:
*
*
*
* - Have more than five streams in the CREATING state at any point in
* time.
* - Create more shards than are authorized for your account.
*
*
*
* Note: The default limit for an AWS account is 10 shards per
* stream. If you need to create a stream with more than 10 shards,
* contact AWS Support
* to increase the limit on your account.
*
*
* You can use the DescribeStream
operation to check the
* stream status, which is returned in StreamStatus
.
*
*
* CreateStream
has a limit of 5 transactions per second
* per account.
*
*
* @param createStreamRequest Container for the necessary parameters to
* execute the CreateStream service method on AmazonKinesis.
*
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void createStream(CreateStreamRequest createStreamRequest) {
ExecutionContext executionContext = createExecutionContext(createStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request;
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateStreamRequestMarshaller().marshall(createStreamRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
JsonResponseHandler responseHandler = new JsonResponseHandler(null);
invoke(request, responseHandler, executionContext);
}
/**
*
* This operation deletes a stream and all of its shards and data. You
* must shut down any applications that are operating on the stream
* before you delete the stream. If an application attempts to operate on
* a deleted stream, it will receive the exception
* ResourceNotFoundException
.
*
*
* If the stream is in the ACTIVE state, you can delete it. After a
* DeleteStream
request, the specified stream is in the
* DELETING state until Amazon Kinesis completes the deletion.
*
*
* Note: Amazon Kinesis might continue to accept data read and
* write operations, such as PutRecord and GetRecords, on a stream in the
* DELETING state until the stream deletion is complete.
*
*
* When you delete a stream, any shards in that stream are also deleted.
*
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in StreamStatus
.
*
*
* DeleteStream
has a limit of 5 transactions per second
* per account.
*
*
* @param deleteStreamRequest Container for the necessary parameters to
* execute the DeleteStream service method on AmazonKinesis.
*
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteStream(DeleteStreamRequest deleteStreamRequest) {
ExecutionContext executionContext = createExecutionContext(deleteStreamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request;
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteStreamRequestMarshaller().marshall(deleteStreamRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
JsonResponseHandler responseHandler = new JsonResponseHandler(null);
invoke(request, responseHandler, executionContext);
}
/**
*
* This operation returns an array of the names of all the streams that
* are associated with the AWS account making the
* ListStreams
request. A given AWS account can have many
* streams active at one time.
*
*
* The number of streams may be too large to return from a single call
* to ListStreams
.
* You can limit the number of returned streams using the
* Limit
parameter. If you do not specify a value for the
* Limit
parameter, Amazon Kinesis uses the default limit,
* which is currently 10.
*
*
* You can detect if there are more streams available to list by using
* the HasMoreStreams
flag from the returned output. If
* there are more streams available, you can request more streams by
* using the name of the last stream returned by the
* ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent
* request to ListStreams
.
* The group of stream names returned by the subsequent
* request is then added to the list. You can continue this process until
* all the stream names have been collected in the list.
*
*
* ListStreams
has a limit of 5 transactions per second per
* account.
*
*
* @param listStreamsRequest Container for the necessary parameters to
* execute the ListStreams service method on AmazonKinesis.
*
* @return The response from the ListStreams service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public ListStreamsResult listStreams(ListStreamsRequest listStreamsRequest) {
ExecutionContext executionContext = createExecutionContext(listStreamsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListStreamsRequestMarshaller().marshall(listStreamsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
Unmarshaller unmarshaller = new ListStreamsResultJsonUnmarshaller();
JsonResponseHandler responseHandler = new JsonResponseHandler(unmarshaller);
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response, LOGGING_AWS_REQUEST_METRIC);
}
}
/**
*
* This operation merges two adjacent shards in a stream and combines
* them into a single shard to reduce the stream's capacity to ingest and
* transport data. Two shards are considered adjacent if the union of the
* hash key ranges for the two shards form a contiguous set with no gaps.
* For example, if you have two shards, one with a hash key range of
* 276...381 and the other with a hash key range of 382...454, then you
* could merge these two shards into a single shard that would have a
* hash key range of 276...454. After the merge, the single child shard
* receives data for all hash key values covered by the two parent
* shards.
*
*
* MergeShards
is called when there is a need to reduce the
* overall capacity of a stream because of excess capacity that is not
* being used. The operation requires that you specify the shard to be
* merged and the adjacent shard for a given stream. For more information
* about merging shards, see the
* Amazon Kinesis Developer Guide
* .
*
*
* If the stream is in the ACTIVE state, you can call
* MergeShards
. If a stream is in CREATING or UPDATING or
* DELETING states, then Amazon Kinesis returns a
* ResourceInUseException
.
* If the specified stream does not exist, Amazon Kinesis
* returns a ResourceNotFoundException
.
*
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in StreamStatus
.
*
*
* MergeShards
is an asynchronous operation. Upon receiving
* a MergeShards
request, Amazon Kinesis immediately returns
* a response and sets the StreamStatus
to UPDATING. After
* the operation is completed, Amazon Kinesis sets the
* StreamStatus
to ACTIVE. Read and write operations
* continue to work while the stream is in the UPDATING state.
*
*
* You use the DescribeStream operation to determine the shard IDs that
* are specified in the MergeShards
request.
*
*
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream, MergeShards
or SplitShard,
* you will receive a LimitExceededException
.
*
*
* MergeShards
has limit of 5 transactions per second per
* account.
*
*
* @param mergeShardsRequest Container for the necessary parameters to
* execute the MergeShards service method on AmazonKinesis.
*
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void mergeShards(MergeShardsRequest mergeShardsRequest) {
ExecutionContext executionContext = createExecutionContext(mergeShardsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
Request request;
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new MergeShardsRequestMarshaller().marshall(mergeShardsRequest);
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
JsonResponseHandler responseHandler = new JsonResponseHandler(null);
invoke(request, responseHandler, executionContext);
}
/**
*
* This operation returns an array of the names of all the streams that
* are associated with the AWS account making the
* ListStreams
request. A given AWS account can have many
* streams active at one time.
*
*
* The number of streams may be too large to return from a single call
* to ListStreams
.
* You can limit the number of returned streams using the
* Limit
parameter. If you do not specify a value for the
* Limit
parameter, Amazon Kinesis uses the default limit,
* which is currently 10.
*
*
* You can detect if there are more streams available to list by using
* the HasMoreStreams
flag from the returned output. If
* there are more streams available, you can request more streams by
* using the name of the last stream returned by the
* ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent
* request to ListStreams
.
* The group of stream names returned by the subsequent
* request is then added to the list. You can continue this process until
* all the stream names have been collected in the list.
*
*
* ListStreams
has a limit of 5 transactions per second per
* account.
*
*
* @return The response from the ListStreams service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public ListStreamsResult listStreams() throws AmazonServiceException, AmazonClientException {
return listStreams(new ListStreamsRequest());
}
/**
*
* This operation returns the following information about the stream: the
* current status of the stream, the stream Amazon Resource Name (ARN),
* and an array of shard objects that comprise the stream. For each shard
* object there is information about the hash key and sequence number
* ranges that the shard spans, and the IDs of any earlier shards that
* played in a role in a MergeShards or SplitShard operation that created
* the shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You can limit the number of returned shards using the
* Limit
parameter. The number of shards in a stream may be
* too large to return from a single call to DescribeStream
* .
* You can detect this by using the
* HasMoreShards
flag in the returned output.
* HasMoreShards
is set to true
when there is
* more data available.
*
*
* If there are more shards available, you can request more shards by
* using the shard ID of the last shard returned by the
* DescribeStream
request, in the
* ExclusiveStartShardId
parameter in a subsequent request
* to DescribeStream
.
*
* DescribeStream
is a paginated
* operation.
*
*
* DescribeStream
has a limit of 10 transactions per second
* per account.
*
*
* @param streamName The name of the stream to describe.
*
* @return The response from the DescribeStream service method, as
* returned by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeStreamResult describeStream(String streamName)
throws AmazonServiceException, AmazonClientException {
DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(streamName);
return describeStream(describeStreamRequest);
}
/**
*
* This operation returns the following information about the stream: the
* current status of the stream, the stream Amazon Resource Name (ARN),
* and an array of shard objects that comprise the stream. For each shard
* object there is information about the hash key and sequence number
* ranges that the shard spans, and the IDs of any earlier shards that
* played in a role in a MergeShards or SplitShard operation that created
* the shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You can limit the number of returned shards using the
* Limit
parameter. The number of shards in a stream may be
* too large to return from a single call to DescribeStream
* .
* You can detect this by using the
* HasMoreShards
flag in the returned output.
* HasMoreShards
is set to true
when there is
* more data available.
*
*
* If there are more shards available, you can request more shards by
* using the shard ID of the last shard returned by the
* DescribeStream
request, in the
* ExclusiveStartShardId
parameter in a subsequent request
* to DescribeStream
.
*
* DescribeStream
is a paginated
* operation.
*
*
* DescribeStream
has a limit of 10 transactions per second
* per account.
*
*
* @param streamName The name of the stream to describe.
* @param exclusiveStartShardId The shard ID of the shard to start with
* for the stream description.
*
* @return The response from the DescribeStream service method, as
* returned by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeStreamResult describeStream(String streamName, String exclusiveStartShardId)
throws AmazonServiceException, AmazonClientException {
DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(streamName);
describeStreamRequest.setExclusiveStartShardId(exclusiveStartShardId);
return describeStream(describeStreamRequest);
}
/**
*
* This operation returns the following information about the stream: the
* current status of the stream, the stream Amazon Resource Name (ARN),
* and an array of shard objects that comprise the stream. For each shard
* object there is information about the hash key and sequence number
* ranges that the shard spans, and the IDs of any earlier shards that
* played in a role in a MergeShards or SplitShard operation that created
* the shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You can limit the number of returned shards using the
* Limit
parameter. The number of shards in a stream may be
* too large to return from a single call to DescribeStream
* .
* You can detect this by using the
* HasMoreShards
flag in the returned output.
* HasMoreShards
is set to true
when there is
* more data available.
*
*
* If there are more shards available, you can request more shards by
* using the shard ID of the last shard returned by the
* DescribeStream
request, in the
* ExclusiveStartShardId
parameter in a subsequent request
* to DescribeStream
.
*
* DescribeStream
is a paginated
* operation.
*
*
* DescribeStream
has a limit of 10 transactions per second
* per account.
*
*
* @param streamName The name of the stream to describe.
* @param limit The maximum number of shards to return.
* @param exclusiveStartShardId The shard ID of the shard to start with
* for the stream description.
*
* @return The response from the DescribeStream service method, as
* returned by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public DescribeStreamResult describeStream(String streamName, Integer limit, String exclusiveStartShardId)
throws AmazonServiceException, AmazonClientException {
DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(streamName);
describeStreamRequest.setLimit(limit);
describeStreamRequest.setExclusiveStartShardId(exclusiveStartShardId);
return describeStream(describeStreamRequest);
}
/**
*
* This operation returns a shard iterator in ShardIterator
* . The shard iterator specifies the position in the shard from which
* you want to start reading data records sequentially. A shard iterator
* specifies this position using the sequence number of a data record in
* a shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You must specify the shard iterator type in the
* GetShardIterator
request. For example, you can set the
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence
* number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using
* sequence numbers returned by earlier PutRecord, GetRecords or
* DescribeStream requests. You can specify the shard iterator type
* TRIM_HORIZON in the request to cause ShardIterator
to
* point to the last untrimmed record in the shard in the system, which
* is the oldest data record in the shard. Or you can point to just after
* the most recent record in the shard, by using the shard iterator type
* LATEST, so that you always read the most recent data in the shard.
*
*
* Note: Each shard iterator expires five minutes after it is
* returned to the requester.
*
*
* When you repeatedly read from an Amazon Kinesis stream use a
* GetShardIterator request to get the first shard iterator to to use in
* your first GetRecords
request and then use the shard
* iterator returned by the GetRecords
request in
* NextShardIterator
for subsequent reads. A new shard
* iterator is returned by every GetRecords
request in
* NextShardIterator
,
* which you use in the ShardIterator
parameter
* of the next GetRecords
request.
*
*
* If a GetShardIterator
request is made too often, you will
* receive a ProvisionedThroughputExceededException
.
* For more information about throughput limits, see the
* Amazon Kinesis Developer Guide
* .
*
*
* GetShardIterator
can return null
for its
* ShardIterator
to indicate that the shard has been closed
* and that the requested iterator will return no more data. A shard can
* be closed by a SplitShard or MergeShards operation.
*
*
* GetShardIterator
has a limit of 5 transactions per
* second per account per open shard.
*
*
* @param streamName The name of the stream.
* @param shardId The shard ID of the shard to get the iterator for.
* @param shardIteratorType Determines how the shard iterator is used to
* start reading data records from the shard. The following are the
* valid shard iterator types:
- AT_SEQUENCE_NUMBER - Start
* reading exactly from the position denoted by a specific sequence
* number.
- AFTER_SEQUENCE_NUMBER - Start reading right after the
* position denoted by a specific sequence number.
- TRIM_HORIZON
* - Start reading at the last untrimmed record in the shard in the
* system, which is the oldest data record in the shard.
- LATEST
* - Start reading just after the most recent record in the shard, so
* that you always read the most recent data in the shard.
*
* @return The response from the GetShardIterator service method, as
* returned by AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public GetShardIteratorResult getShardIterator(String streamName, String shardId, String shardIteratorType)
throws AmazonServiceException, AmazonClientException {
GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
getShardIteratorRequest.setStreamName(streamName);
getShardIteratorRequest.setShardId(shardId);
getShardIteratorRequest.setShardIteratorType(shardIteratorType);
return getShardIterator(getShardIteratorRequest);
}
/**
*
* This operation returns a shard iterator in ShardIterator
* . The shard iterator specifies the position in the shard from which
* you want to start reading data records sequentially. A shard iterator
* specifies this position using the sequence number of a data record in
* a shard. A sequence number is the identifier associated with every
* record ingested in the Amazon Kinesis stream. The sequence number is
* assigned by the Amazon Kinesis service when a record is put into the
* stream.
*
*
* You must specify the shard iterator type in the
* GetShardIterator
request. For example, you can set the
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence
* number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using
* sequence numbers returned by earlier PutRecord, GetRecords or
* DescribeStream requests. You can specify the shard iterator type
* TRIM_HORIZON in the request to cause ShardIterator
to
* point to the last untrimmed record in the shard in the system, which
* is the oldest data record in the shard. Or you can point to just after
* the most recent record in the shard, by using the shard iterator type
* LATEST, so that you always read the most recent data in the shard.
*
*
* Note: Each shard iterator expires five minutes after it is
* returned to the requester.
*
*
* When you repeatedly read from an Amazon Kinesis stream use a
* GetShardIterator request to get the first shard iterator to to use in
* your first GetRecords
request and then use the shard
* iterator returned by the GetRecords
request in
* NextShardIterator
for subsequent reads. A new shard
* iterator is returned by every GetRecords
request in
* NextShardIterator
,
* which you use in the ShardIterator
parameter
* of the next GetRecords
request.
*
*
* If a GetShardIterator
request is made too often, you will
* receive a ProvisionedThroughputExceededException
.
* For more information about throughput limits, see the
* Amazon Kinesis Developer Guide
* .
*
*
* GetShardIterator
can return null
for its
* ShardIterator
to indicate that the shard has been closed
* and that the requested iterator will return no more data. A shard can
* be closed by a SplitShard or MergeShards operation.
*
*
* GetShardIterator
has a limit of 5 transactions per
* second per account per open shard.
*
*
* @param streamName The name of the stream.
* @param shardId The shard ID of the shard to get the iterator for.
* @param shardIteratorType Determines how the shard iterator is used to
* start reading data records from the shard. The following are the
* valid shard iterator types:
- AT_SEQUENCE_NUMBER - Start
* reading exactly from the position denoted by a specific sequence
* number.
- AFTER_SEQUENCE_NUMBER - Start reading right after the
* position denoted by a specific sequence number.
- TRIM_HORIZON
* - Start reading at the last untrimmed record in the shard in the
* system, which is the oldest data record in the shard.
- LATEST
* - Start reading just after the most recent record in the shard, so
* that you always read the most recent data in the shard.
* @param startingSequenceNumber The sequence number of the data record
* in the shard from which to start reading from.
*
* @return The response from the GetShardIterator service method, as
* returned by AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public GetShardIteratorResult getShardIterator(String streamName, String shardId, String shardIteratorType, String startingSequenceNumber)
throws AmazonServiceException, AmazonClientException {
GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
getShardIteratorRequest.setStreamName(streamName);
getShardIteratorRequest.setShardId(shardId);
getShardIteratorRequest.setShardIteratorType(shardIteratorType);
getShardIteratorRequest.setStartingSequenceNumber(startingSequenceNumber);
return getShardIterator(getShardIteratorRequest);
}
/**
*
* This operation puts a data record into an Amazon Kinesis stream from a
* producer. This operation must be called to send data from the producer
* into the Amazon Kinesis stream for real-time ingestion and subsequent
* processing. The PutRecord
operation requires the name of
* the stream that captures, stores, and transports the data; a partition
* key; and the data blob itself. The data blob could be a segment from a
* log file, geographic/location data, website clickstream data, or any
* other data type.
*
*
* The partition key is used to distribute data across shards. Amazon
* Kinesis segregates the data records that belong to a data stream into
* multiple shards, using the partition key associated with each data
* record to determine which shard a given data record belongs to.
*
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* bytes. An MD5 hash function is used to map partition keys to 128-bit
* integer values and to map associated data records to shards using the
* hash key ranges of the shards. You can override hashing the partition
* key to determine the shard by explicitly specifying a hash value using
* the ExplicitHashKey
parameter. For more information, see
* the
* Amazon Kinesis Developer Guide
* .
*
*
* PutRecord
returns the shard ID of where the data record
* was placed and the sequence number that was assigned to the data
* record.
*
*
* Sequence numbers generally increase over time. To guarantee strictly
* increasing ordering, use the SequenceNumberForOrdering
* parameter. For more information, see the
* Amazon Kinesis Developer Guide
* .
*
*
* If a PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the
* request, PutRecord
throws
* ProvisionedThroughputExceededException
.
*
*
* Data records are accessible for only 24 hours from the time that they
* are added to an Amazon Kinesis stream.
*
*
* @param streamName The name of the stream to put the data record into.
* @param data The data blob to put into the record, which is
* Base64-encoded when the blob is serialized. The maximum size of the
* data blob (the payload after Base64-decoding) is 50 kilobytes (KB)
* @param partitionKey Determines which shard in the stream the data
* record is assigned to. Partition keys are Unicode strings with a
* maximum length limit of 256 bytes. Amazon Kinesis uses the partition
* key as input to a hash function that maps the partition key and
* associated data to a specific shard. Specifically, an MD5 hash
* function is used to map partition keys to 128-bit integer values and
* to map associated data records to shards. As a result of this hashing
* mechanism, all data records with the same partition key will map to
* the same shard within the stream.
*
* @return The response from the PutRecord service method, as returned by
* AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public PutRecordResult putRecord(String streamName, java.nio.ByteBuffer data, String partitionKey)
throws AmazonServiceException, AmazonClientException {
PutRecordRequest putRecordRequest = new PutRecordRequest();
putRecordRequest.setStreamName(streamName);
putRecordRequest.setData(data);
putRecordRequest.setPartitionKey(partitionKey);
return putRecord(putRecordRequest);
}
/**
*
* This operation puts a data record into an Amazon Kinesis stream from a
* producer. This operation must be called to send data from the producer
* into the Amazon Kinesis stream for real-time ingestion and subsequent
* processing. The PutRecord
operation requires the name of
* the stream that captures, stores, and transports the data; a partition
* key; and the data blob itself. The data blob could be a segment from a
* log file, geographic/location data, website clickstream data, or any
* other data type.
*
*
* The partition key is used to distribute data across shards. Amazon
* Kinesis segregates the data records that belong to a data stream into
* multiple shards, using the partition key associated with each data
* record to determine which shard a given data record belongs to.
*
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* bytes. An MD5 hash function is used to map partition keys to 128-bit
* integer values and to map associated data records to shards using the
* hash key ranges of the shards. You can override hashing the partition
* key to determine the shard by explicitly specifying a hash value using
* the ExplicitHashKey
parameter. For more information, see
* the
* Amazon Kinesis Developer Guide
* .
*
*
* PutRecord
returns the shard ID of where the data record
* was placed and the sequence number that was assigned to the data
* record.
*
*
* Sequence numbers generally increase over time. To guarantee strictly
* increasing ordering, use the SequenceNumberForOrdering
* parameter. For more information, see the
* Amazon Kinesis Developer Guide
* .
*
*
* If a PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the
* request, PutRecord
throws
* ProvisionedThroughputExceededException
.
*
*
* Data records are accessible for only 24 hours from the time that they
* are added to an Amazon Kinesis stream.
*
*
* @param streamName The name of the stream to put the data record into.
* @param data The data blob to put into the record, which is
* Base64-encoded when the blob is serialized. The maximum size of the
* data blob (the payload after Base64-decoding) is 50 kilobytes (KB)
* @param partitionKey Determines which shard in the stream the data
* record is assigned to. Partition keys are Unicode strings with a
* maximum length limit of 256 bytes. Amazon Kinesis uses the partition
* key as input to a hash function that maps the partition key and
* associated data to a specific shard. Specifically, an MD5 hash
* function is used to map partition keys to 128-bit integer values and
* to map associated data records to shards. As a result of this hashing
* mechanism, all data records with the same partition key will map to
* the same shard within the stream.
* @param sequenceNumberForOrdering Guarantees strictly increasing
* sequence numbers, for puts from the same client and to the same
* partition key. Usage: set the SequenceNumberForOrdering
* of record n to the sequence number of record n-1 (as
* returned in the PutRecordResult when putting record
* n-1). If this parameter is not set, records will be coarsely
* ordered based on arrival time.
*
* @return The response from the PutRecord service method, as returned by
* AmazonKinesis.
*
* @throws ProvisionedThroughputExceededException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public PutRecordResult putRecord(String streamName, java.nio.ByteBuffer data, String partitionKey, String sequenceNumberForOrdering)
throws AmazonServiceException, AmazonClientException {
PutRecordRequest putRecordRequest = new PutRecordRequest();
putRecordRequest.setStreamName(streamName);
putRecordRequest.setData(data);
putRecordRequest.setPartitionKey(partitionKey);
putRecordRequest.setSequenceNumberForOrdering(sequenceNumberForOrdering);
return putRecord(putRecordRequest);
}
/**
*
* This operation splits a shard into two new shards in the stream, to
* increase the stream's capacity to ingest and transport data.
* SplitShard
is called when there is a need to increase the
* overall capacity of stream because of an expected increase in the
* volume of data records being ingested.
*
*
* SplitShard
can also be used when a given shard appears
* to be approaching its maximum utilization, for example, when the set
* of producers sending data into the specific shard are suddenly sending
* more than previously anticipated. You can also call the
* SplitShard
operation to increase stream capacity, so that
* more Amazon Kinesis applications can simultaneously read data from the
* stream for real-time processing.
*
*
* The SplitShard
operation requires that you specify the
* shard to be split and the new hash key, which is the position in the
* shard where the shard gets split in two. In many cases, the new hash
* key might simply be the average of the beginning and ending hash key,
* but it can be any hash key value in the range being mapped into the
* shard. For more information about splitting shards, see the
* Amazon Kinesis Developer Guide
* .
*
*
* You can use the DescribeStream operation to determine the shard ID and
* hash key values for the ShardToSplit
and
* NewStartingHashKey
parameters that are specified in the
* SplitShard
request.
*
*
* SplitShard
is an asynchronous operation. Upon receiving
* a SplitShard
request, Amazon Kinesis immediately returns
* a response and sets the stream status to UPDATING. After the operation
* is completed, Amazon Kinesis sets the stream status to ACTIVE. Read
* and write operations continue to work while the stream is in the
* UPDATING state.
*
*
* You can use DescribeStream
to check the status of the
* stream, which is returned in StreamStatus
.
* If the stream is in the ACTIVE state, you can call
* SplitShard
.
* If a stream is in CREATING or UPDATING or DELETING
* states, then Amazon Kinesis returns a
* ResourceInUseException
.
*
*
* If the specified stream does not exist, Amazon Kinesis returns a
* ResourceNotFoundException
.
* If you try to create more shards than are authorized
* for your account, you receive a LimitExceededException
.
*
*
* Note: The default limit for an AWS account is 10 shards per
* stream. If you need to create a stream with more than 10 shards,
* contact AWS Support
* to increase the limit on your account.
*
*
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream, MergeShards or SplitShard, you will
* receive a LimitExceededException
.
*
*
* SplitShard
has limit of 5 transactions per second per
* account.
*
*
* @param streamName The name of the stream for the shard split.
* @param shardToSplit The shard ID of the shard to split.
* @param newStartingHashKey A hash key value for the starting hash key
* of one of the child shards created by the split. The hash key range
* for a given shard constitutes a set of ordered contiguous positive
* integers. The value for NewStartingHashKey
must be in the
* range of hash keys being mapped into the shard. The
* NewStartingHashKey
hash key value and all higher hash key
* values in hash key range are distributed to one of the child shards.
* All the lower hash key values in the range are distributed to the
* other child shard.
*
* @return The response from the SplitShard service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void splitShard(String streamName, String shardToSplit, String newStartingHashKey)
throws AmazonServiceException, AmazonClientException {
SplitShardRequest splitShardRequest = new SplitShardRequest();
splitShardRequest.setStreamName(streamName);
splitShardRequest.setShardToSplit(shardToSplit);
splitShardRequest.setNewStartingHashKey(newStartingHashKey);
splitShard(splitShardRequest);
}
/**
*
* This operation adds a new Amazon Kinesis stream to your AWS account. A
* stream captures and transports data records that are continuously
* emitted from different data sources or producers .
* Scale-out within an Amazon Kinesis stream is explicitly
* supported by means of shards, which are uniquely identified groups of
* data records in an Amazon Kinesis stream.
*
*
* You specify and control the number of shards that a stream is composed
* of. Each open shard can support up to 5 read transactions per second,
* up to a maximum total of 2 MB of data read per second. Each shard can
* support up to 1000 write transactions per second, up to a maximum
* total of 1 MB data written per second. You can add shards to a stream
* if the amount of data input increases and you can remove shards if the
* amount of data input decreases.
*
*
* The stream name identifies the stream. The name is scoped to the AWS
* account used by the application. It is also scoped by region. That is,
* two streams in two different accounts can have the same name, and two
* streams in the same account, but in two different regions, can have
* the same name.
*
*
* CreateStream
is an asynchronous operation. Upon
* receiving a CreateStream
request, Amazon Kinesis
* immediately returns and sets the stream status to CREATING. After the
* stream is created, Amazon Kinesis sets the stream status to ACTIVE.
* You should perform read and write operations only on an ACTIVE stream.
*
*
* You receive a LimitExceededException
when making a
* CreateStream
request if you try to do one of the
* following:
*
*
*
* - Have more than five streams in the CREATING state at any point in
* time.
* - Create more shards than are authorized for your account.
*
*
*
* Note: The default limit for an AWS account is 10 shards per
* stream. If you need to create a stream with more than 10 shards,
* contact AWS Support
* to increase the limit on your account.
*
*
* You can use the DescribeStream
operation to check the
* stream status, which is returned in StreamStatus
.
*
*
* CreateStream
has a limit of 5 transactions per second
* per account.
*
*
* @param streamName A name to identify the stream. The stream name is
* scoped to the AWS account used by the application that creates the
* stream. It is also scoped by region. That is, two streams in two
* different AWS accounts can have the same name, and two streams in the
* same AWS account, but in two different regions, can have the same
* name.
* @param shardCount The number of shards that the stream will use. The
* throughput of the stream is a function of the number of shards; more
* shards are required for greater provisioned throughput.
* Note: The default limit for an AWS account is 10 shards per
* stream. If you need to create a stream with more than 10 shards, contact
* AWS Support to increase the limit on your account.
*
* @return The response from the CreateStream service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void createStream(String streamName, Integer shardCount)
throws AmazonServiceException, AmazonClientException {
CreateStreamRequest createStreamRequest = new CreateStreamRequest();
createStreamRequest.setStreamName(streamName);
createStreamRequest.setShardCount(shardCount);
createStream(createStreamRequest);
}
/**
*
* This operation deletes a stream and all of its shards and data. You
* must shut down any applications that are operating on the stream
* before you delete the stream. If an application attempts to operate on
* a deleted stream, it will receive the exception
* ResourceNotFoundException
.
*
*
* If the stream is in the ACTIVE state, you can delete it. After a
* DeleteStream
request, the specified stream is in the
* DELETING state until Amazon Kinesis completes the deletion.
*
*
* Note: Amazon Kinesis might continue to accept data read and
* write operations, such as PutRecord and GetRecords, on a stream in the
* DELETING state until the stream deletion is complete.
*
*
* When you delete a stream, any shards in that stream are also deleted.
*
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in StreamStatus
.
*
*
* DeleteStream
has a limit of 5 transactions per second
* per account.
*
*
* @param streamName The name of the stream to delete.
*
* @return The response from the DeleteStream service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void deleteStream(String streamName)
throws AmazonServiceException, AmazonClientException {
DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
deleteStreamRequest.setStreamName(streamName);
deleteStream(deleteStreamRequest);
}
/**
*
* This operation returns an array of the names of all the streams that
* are associated with the AWS account making the
* ListStreams
request. A given AWS account can have many
* streams active at one time.
*
*
* The number of streams may be too large to return from a single call
* to ListStreams
.
* You can limit the number of returned streams using the
* Limit
parameter. If you do not specify a value for the
* Limit
parameter, Amazon Kinesis uses the default limit,
* which is currently 10.
*
*
* You can detect if there are more streams available to list by using
* the HasMoreStreams
flag from the returned output. If
* there are more streams available, you can request more streams by
* using the name of the last stream returned by the
* ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent
* request to ListStreams
.
* The group of stream names returned by the subsequent
* request is then added to the list. You can continue this process until
* all the stream names have been collected in the list.
*
*
* ListStreams
has a limit of 5 transactions per second per
* account.
*
*
* @param exclusiveStartStreamName The name of the stream to start the
* list with.
*
* @return The response from the ListStreams service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public ListStreamsResult listStreams(String exclusiveStartStreamName)
throws AmazonServiceException, AmazonClientException {
ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
listStreamsRequest.setExclusiveStartStreamName(exclusiveStartStreamName);
return listStreams(listStreamsRequest);
}
/**
*
* This operation returns an array of the names of all the streams that
* are associated with the AWS account making the
* ListStreams
request. A given AWS account can have many
* streams active at one time.
*
*
* The number of streams may be too large to return from a single call
* to ListStreams
.
* You can limit the number of returned streams using the
* Limit
parameter. If you do not specify a value for the
* Limit
parameter, Amazon Kinesis uses the default limit,
* which is currently 10.
*
*
* You can detect if there are more streams available to list by using
* the HasMoreStreams
flag from the returned output. If
* there are more streams available, you can request more streams by
* using the name of the last stream returned by the
* ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent
* request to ListStreams
.
* The group of stream names returned by the subsequent
* request is then added to the list. You can continue this process until
* all the stream names have been collected in the list.
*
*
* ListStreams
has a limit of 5 transactions per second per
* account.
*
*
* @param limit The maximum number of streams to list.
* @param exclusiveStartStreamName The name of the stream to start the
* list with.
*
* @return The response from the ListStreams service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public ListStreamsResult listStreams(Integer limit, String exclusiveStartStreamName)
throws AmazonServiceException, AmazonClientException {
ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
listStreamsRequest.setLimit(limit);
listStreamsRequest.setExclusiveStartStreamName(exclusiveStartStreamName);
return listStreams(listStreamsRequest);
}
/**
*
* This operation merges two adjacent shards in a stream and combines
* them into a single shard to reduce the stream's capacity to ingest and
* transport data. Two shards are considered adjacent if the union of the
* hash key ranges for the two shards form a contiguous set with no gaps.
* For example, if you have two shards, one with a hash key range of
* 276...381 and the other with a hash key range of 382...454, then you
* could merge these two shards into a single shard that would have a
* hash key range of 276...454. After the merge, the single child shard
* receives data for all hash key values covered by the two parent
* shards.
*
*
* MergeShards
is called when there is a need to reduce the
* overall capacity of a stream because of excess capacity that is not
* being used. The operation requires that you specify the shard to be
* merged and the adjacent shard for a given stream. For more information
* about merging shards, see the
* Amazon Kinesis Developer Guide
* .
*
*
* If the stream is in the ACTIVE state, you can call
* MergeShards
. If a stream is in CREATING or UPDATING or
* DELETING states, then Amazon Kinesis returns a
* ResourceInUseException
.
* If the specified stream does not exist, Amazon Kinesis
* returns a ResourceNotFoundException
.
*
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in StreamStatus
.
*
*
* MergeShards
is an asynchronous operation. Upon receiving
* a MergeShards
request, Amazon Kinesis immediately returns
* a response and sets the StreamStatus
to UPDATING. After
* the operation is completed, Amazon Kinesis sets the
* StreamStatus
to ACTIVE. Read and write operations
* continue to work while the stream is in the UPDATING state.
*
*
* You use the DescribeStream operation to determine the shard IDs that
* are specified in the MergeShards
request.
*
*
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream, MergeShards
or SplitShard,
* you will receive a LimitExceededException
.
*
*
* MergeShards
has limit of 5 transactions per second per
* account.
*
*
* @param streamName The name of the stream for the merge.
* @param shardToMerge The shard ID of the shard to combine with the
* adjacent shard for the merge.
* @param adjacentShardToMerge The shard ID of the adjacent shard for the
* merge.
*
* @return The response from the MergeShards service method, as returned
* by AmazonKinesis.
*
* @throws LimitExceededException
* @throws ResourceInUseException
* @throws InvalidArgumentException
* @throws ResourceNotFoundException
*
* @throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response. For example
* if a network connection is not available.
* @throws AmazonServiceException
* If an error response is returned by AmazonKinesis indicating
* either a problem with the data in the request, or a server side issue.
*/
public void mergeShards(String streamName, String shardToMerge, String adjacentShardToMerge)
throws AmazonServiceException, AmazonClientException {
MergeShardsRequest mergeShardsRequest = new MergeShardsRequest();
mergeShardsRequest.setStreamName(streamName);
mergeShardsRequest.setShardToMerge(shardToMerge);
mergeShardsRequest.setAdjacentShardToMerge(adjacentShardToMerge);
mergeShards(mergeShardsRequest);
}
@Override
public void setEndpoint(String endpoint) {
super.setEndpoint(endpoint);
}
@Override
public void setEndpoint(String endpoint, String serviceName, String regionId) throws IllegalArgumentException {
super.setEndpoint(endpoint, serviceName, regionId);
}
/**
* Returns additional metadata for a previously executed successful, request, typically used for
* debugging issues where a service isn't acting as expected. This data isn't considered part
* of the result data returned by an operation, so it's available through this separate,
* diagnostic interface.
*
* Response metadata is only cached for a limited period of time, so if you need to access
* this extra diagnostic information for an executed request, you should use this method
* to retrieve it as soon as possible after executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none
* is available.
*/
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
private Response invoke(Request request,
HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
request.setEndpoint(endpoint);
request.setTimeOffset(timeOffset);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
AWSCredentials credentials;
awsRequestMetrics.startEvent(Field.CredentialsRequestTime);
try {
credentials = awsCredentialsProvider.getCredentials();
} finally {
awsRequestMetrics.endEvent(Field.CredentialsRequestTime);
}
AmazonWebServiceRequest originalRequest = request.getOriginalRequest();
if (originalRequest != null && originalRequest.getRequestCredentials() != null) {
credentials = originalRequest.getRequestCredentials();
}
executionContext.setCredentials(credentials);
JsonErrorResponseHandler errorResponseHandler = new JsonErrorResponseHandler(jsonErrorUnmarshallers);
Response result = client.execute(request, responseHandler,
errorResponseHandler, executionContext);
return result;
}
}