com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient Maven / Gradle / Ivy
Show all versions of aws-java-sdk-dynamodb Show documentation
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.dynamodbv2;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import javax.annotation.Generated;
import org.apache.commons.logging.*;
import com.amazonaws.services.dynamodbv2.endpointdiscovery.AmazonDynamoDBEndpointCache;
import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.client.builder.AdvancedConfig;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.waiters.AmazonDynamoDBWaiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.model.*;
import com.amazonaws.services.dynamodbv2.model.transform.*;
/**
* Client for accessing DynamoDB. All service calls made using this client are blocking, and will not return until the
* service call completes.
*
* Amazon DynamoDB
*
* Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with
* seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed
* database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software
* patching, or cluster scaling.
*
*
* With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of
* request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance
* degradation, and use the AWS Management Console to monitor resource utilization and performance metrics.
*
*
* DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle
* your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is
* stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an AWS region,
* providing built-in high availability and data durability.
*
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonDynamoDBClient extends AmazonWebServiceClient implements AmazonDynamoDB {
// register the service specific set of predefined metrics
static {
AwsSdkMetrics.addAll(Arrays.asList(com.amazonaws.services.dynamodbv2.metrics.DynamoDBRequestMetric.values()));
}
protected AmazonDynamoDBEndpointCache cache;
private final boolean endpointDiscoveryEnabled;
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonDynamoDB.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "dynamodb";
private volatile AmazonDynamoDBWaiters waiters;
/** Client configuration factory providing ClientConfigurations tailored to this client */
protected static final com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientConfigurationFactory configFactory = new com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientConfigurationFactory();
private final AdvancedConfig advancedConfig;
private static final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory = new com.amazonaws.protocol.json.SdkJsonProtocolFactory(
new JsonClientMetadata()
.withProtocolVersion("1.0")
.withSupportsCbor(false)
.withSupportsIon(false)
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("RequestLimitExceeded").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.RequestLimitExceededExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("GlobalTableAlreadyExistsException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.GlobalTableAlreadyExistsExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ConditionalCheckFailedException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ConditionalCheckFailedExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("LimitExceededException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.LimitExceededExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("GlobalTableNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.GlobalTableNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ItemCollectionSizeLimitExceededException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ItemCollectionSizeLimitExceededExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ReplicaNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ReplicaNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("BackupInUseException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.BackupInUseExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ResourceNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ContinuousBackupsUnavailableException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ContinuousBackupsUnavailableExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("IdempotentParameterMismatchException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.IdempotentParameterMismatchExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ExportNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ExportNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TransactionInProgressException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TransactionInProgressExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TableInUseException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TableInUseExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ProvisionedThroughputExceededException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ProvisionedThroughputExceededExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("PointInTimeRecoveryUnavailableException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.PointInTimeRecoveryUnavailableExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceInUseException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ResourceInUseExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TableAlreadyExistsException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TableAlreadyExistsExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ExportConflictException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ExportConflictExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TransactionConflictException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TransactionConflictExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("InvalidRestoreTimeException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.InvalidRestoreTimeExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ReplicaAlreadyExistsException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.ReplicaAlreadyExistsExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("BackupNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.BackupNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("IndexNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.IndexNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TableNotFoundException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TableNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("DuplicateItemException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.DuplicateItemExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("TransactionCanceledException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.TransactionCanceledExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("InvalidExportTimeException").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.InvalidExportTimeExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("InternalServerError").withExceptionUnmarshaller(
com.amazonaws.services.dynamodbv2.model.transform.InternalServerErrorExceptionUnmarshaller.getInstance()))
.withBaseServiceExceptionClass(com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException.class));
/**
* Constructs a new client to invoke service methods on DynamoDB. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonDynamoDBClientBuilder#defaultClient()}
*/
@Deprecated
public AmazonDynamoDBClient() {
this(DefaultAWSCredentialsProviderChain.getInstance(), configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on DynamoDB. A credentials provider chain will be used that
* searches for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance profile credentials delivered through the Amazon EC2 metadata service
*
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientConfiguration
* The client configuration options controlling how this client connects to DynamoDB (ex: proxy settings,
* retry counts, etc.).
*
* @see DefaultAWSCredentialsProviderChain
* @deprecated use {@link AmazonDynamoDBClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonDynamoDBClient(ClientConfiguration clientConfiguration) {
this(DefaultAWSCredentialsProviderChain.getInstance(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified AWS account credentials.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @deprecated use {@link AmazonDynamoDBClientBuilder#withCredentials(AWSCredentialsProvider)} for example:
* {@code AmazonDynamoDBClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)).build();}
*/
@Deprecated
public AmazonDynamoDBClient(AWSCredentials awsCredentials) {
this(awsCredentials, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified AWS account credentials and
* client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentials
* The AWS credentials (access key ID and secret key) to use when authenticating with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to DynamoDB (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonDynamoDBClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonDynamoDBClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonDynamoDBClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
super(clientConfiguration);
this.endpointDiscoveryEnabled = false;
this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials);
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified AWS account credentials
* provider.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @deprecated use {@link AmazonDynamoDBClientBuilder#withCredentials(AWSCredentialsProvider)}
*/
@Deprecated
public AmazonDynamoDBClient(AWSCredentialsProvider awsCredentialsProvider) {
this(awsCredentialsProvider, configFactory.getConfig());
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified AWS account credentials
* provider and client configuration options.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to DynamoDB (ex: proxy settings,
* retry counts, etc.).
* @deprecated use {@link AmazonDynamoDBClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonDynamoDBClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonDynamoDBClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) {
this(awsCredentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified AWS account credentials
* provider, client configuration options, and request metric collector.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param awsCredentialsProvider
* The AWS credentials provider which will provide credentials to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client connects to DynamoDB (ex: proxy settings,
* retry counts, etc.).
* @param requestMetricCollector
* optional request metric collector
* @deprecated use {@link AmazonDynamoDBClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonDynamoDBClientBuilder#withClientConfiguration(ClientConfiguration)} and
* {@link AmazonDynamoDBClientBuilder#withMetricsCollector(RequestMetricCollector)}
*/
@Deprecated
public AmazonDynamoDBClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
super(clientConfiguration, requestMetricCollector);
this.awsCredentialsProvider = awsCredentialsProvider;
this.endpointDiscoveryEnabled = false;
this.advancedConfig = AdvancedConfig.EMPTY;
init();
}
public static AmazonDynamoDBClientBuilder builder() {
return AmazonDynamoDBClientBuilder.standard();
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonDynamoDBClient(AwsSyncClientParams clientParams) {
this(clientParams, false);
}
/**
* Constructs a new client to invoke service methods on DynamoDB using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonDynamoDBClient(AwsSyncClientParams clientParams, boolean endpointDiscoveryEnabled) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
this.advancedConfig = clientParams.getAdvancedConfig();
init();
}
private void init() {
if (endpointDiscoveryEnabled) {
cache = new AmazonDynamoDBEndpointCache(this);
}
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
setEndpoint("https://dynamodb.us-east-1.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/dynamodbv2/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/dynamodbv2/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
*
* This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL.
*
*
* @param batchExecuteStatementRequest
* @return Result of the BatchExecuteStatement operation returned by the service.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.BatchExecuteStatement
* @see AWS
* API Documentation
*/
@Override
public BatchExecuteStatementResult batchExecuteStatement(BatchExecuteStatementRequest request) {
request = beforeClientExecution(request);
return executeBatchExecuteStatement(request);
}
@SdkInternalApi
final BatchExecuteStatementResult executeBatchExecuteStatement(BatchExecuteStatementRequest batchExecuteStatementRequest) {
ExecutionContext executionContext = createExecutionContext(batchExecuteStatementRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchExecuteStatementRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(batchExecuteStatementRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchExecuteStatement");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new BatchExecuteStatementResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* The BatchGetItem
operation returns the attributes of one or more items from one or more tables. You
* identify requested items by primary key.
*
*
* A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items.
* BatchGetItem
returns a partial result if the response size limit is exceeded, the table's
* provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned,
* the operation returns a value for UnprocessedKeys
. You can use this value to retry the operation
* starting with the next item to get.
*
*
*
* If you request more than 100 items, BatchGetItem
returns a ValidationException
with the
* message "Too many items requested for the BatchGetItem call."
*
*
*
* For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52
* items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys
value so
* you can get the next page of results. If desired, your application can include its own logic to assemble the
* pages of results into one dataset.
*
*
* If none of the items can be processed due to insufficient provisioned throughput on all of the tables in
* the request, then BatchGetItem
returns a ProvisionedThroughputExceededException
. If
* at least one of the items is successfully processed, then BatchGetItem
completes
* successfully, while returning the keys of the unread items in UnprocessedKeys
.
*
*
*
* If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we
* strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation
* immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If
* you delay the batch operation using exponential backoff, the individual requests in the batch are much more
* likely to succeed.
*
*
* For more information, see Batch
* Operations and Error Handling in the Amazon DynamoDB Developer Guide.
*
*
*
* By default, BatchGetItem
performs eventually consistent reads on every table in the request. If you
* want strongly consistent reads instead, you can set ConsistentRead
to true
for any or
* all tables.
*
*
* In order to minimize response latency, BatchGetItem
retrieves items in parallel.
*
*
* When designing your application, keep in mind that DynamoDB does not return items in any particular order. To
* help parse the response by item, include the primary key values for the items in your request in the
* ProjectionExpression
parameter.
*
*
* If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the
* minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
*
*
* @param batchGetItemRequest
* Represents the input of a BatchGetItem
operation.
* @return Result of the BatchGetItem operation returned by the service.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.BatchGetItem
* @see AWS API
* Documentation
*/
@Override
public BatchGetItemResult batchGetItem(BatchGetItemRequest request) {
request = beforeClientExecution(request);
return executeBatchGetItem(request);
}
@SdkInternalApi
final BatchGetItemResult executeBatchGetItem(BatchGetItemRequest batchGetItemRequest) {
ExecutionContext executionContext = createExecutionContext(batchGetItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchGetItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(batchGetItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchGetItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new BatchGetItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public BatchGetItemResult batchGetItem(java.util.Map requestItems, String returnConsumedCapacity) {
return batchGetItem(new BatchGetItemRequest().withRequestItems(requestItems).withReturnConsumedCapacity(returnConsumedCapacity));
}
@Override
public BatchGetItemResult batchGetItem(java.util.Map requestItems) {
return batchGetItem(new BatchGetItemRequest().withRequestItems(requestItems));
}
/**
*
* The BatchWriteItem
operation puts or deletes multiple items in one or more tables. A single call to
* BatchWriteItem
can write up to 16 MB of data, which can comprise as many as 25 put or delete
* requests. Individual items to be written can be as large as 400 KB.
*
*
*
* BatchWriteItem
cannot update items. To update items, use the UpdateItem
action.
*
*
*
* The individual PutItem
and DeleteItem
operations specified in
* BatchWriteItem
are atomic; however BatchWriteItem
as a whole is not. If any requested
* operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs,
* the failed operations are returned in the UnprocessedItems
response parameter. You can investigate
* and optionally resend the requests. Typically, you would call BatchWriteItem
in a loop. Each
* iteration would check for unprocessed items and submit a new BatchWriteItem
request with those
* unprocessed items until all items have been processed.
*
*
* If none of the items can be processed due to insufficient provisioned throughput on all of the tables in
* the request, then BatchWriteItem
returns a ProvisionedThroughputExceededException
.
*
*
*
* If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we
* strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation
* immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If
* you delay the batch operation using exponential backoff, the individual requests in the batch are much more
* likely to succeed.
*
*
* For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.
*
*
*
* With BatchWriteItem
, you can efficiently write or delete large amounts of data, such as from Amazon
* EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale
* operations, BatchWriteItem
does not behave in the same way as individual PutItem
and
* DeleteItem
calls would. For example, you cannot specify conditions on individual put and delete
* requests, and BatchWriteItem
does not return deleted items in the response.
*
*
* If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your
* application must include the necessary logic to manage the threads. With languages that don't support threading,
* you must update or delete the specified items one at a time. In both situations, BatchWriteItem
* performs the specified put and delete operations in parallel, giving you the power of the thread pool approach
* without having to introduce complexity into your application.
*
*
* Parallel processing reduces latency, but each specified put and delete request consumes the same number of write
* capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one
* write capacity unit.
*
*
* If one or more of the following is true, DynamoDB rejects the entire batch write operation:
*
*
* -
*
* One or more tables specified in the BatchWriteItem
request does not exist.
*
*
* -
*
* Primary key attributes specified on an item in the request do not match those in the corresponding table's
* primary key schema.
*
*
* -
*
* You try to perform multiple operations on the same item in the same BatchWriteItem
request. For
* example, you cannot put and delete the same item in the same BatchWriteItem
request.
*
*
* -
*
* Your request contains at least two items with identical hash and range keys (which essentially is two put
* operations).
*
*
* -
*
* There are more than 25 requests in the batch.
*
*
* -
*
* Any individual item in a batch exceeds 400 KB.
*
*
* -
*
* The total request size exceeds 16 MB.
*
*
*
*
* @param batchWriteItemRequest
* Represents the input of a BatchWriteItem
operation.
* @return Result of the BatchWriteItem operation returned by the service.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ItemCollectionSizeLimitExceededException
* An item collection is too large. This exception is only returned for tables that have one or more local
* secondary indexes.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.BatchWriteItem
* @see AWS API
* Documentation
*/
@Override
public BatchWriteItemResult batchWriteItem(BatchWriteItemRequest request) {
request = beforeClientExecution(request);
return executeBatchWriteItem(request);
}
@SdkInternalApi
final BatchWriteItemResult executeBatchWriteItem(BatchWriteItemRequest batchWriteItemRequest) {
ExecutionContext executionContext = createExecutionContext(batchWriteItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchWriteItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(batchWriteItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchWriteItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new BatchWriteItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public BatchWriteItemResult batchWriteItem(java.util.Map> requestItems) {
return batchWriteItem(new BatchWriteItemRequest().withRequestItems(requestItems));
}
/**
*
* Creates a backup for an existing table.
*
*
* Each time you create an on-demand backup, the entire table data is backed up. There is no limit to the number of
* on-demand backups that can be taken.
*
*
* When you create an on-demand backup, a time marker of the request is cataloged, and the backup is created
* asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup
* requests are processed instantaneously and become available for restore within minutes.
*
*
* You can call CreateBackup
at a maximum rate of 50 times per second.
*
*
* All backups in DynamoDB work without consuming any provisioned throughput on the table.
*
*
* If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed
* to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup might contain data
* modifications made between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.
*
*
* Along with data, the following are also included on the backups:
*
*
* -
*
* Global secondary indexes (GSIs)
*
*
* -
*
* Local secondary indexes (LSIs)
*
*
* -
*
* Streams
*
*
* -
*
* Provisioned read and write capacity
*
*
*
*
* @param createBackupRequest
* @return Result of the CreateBackup operation returned by the service.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @throws TableInUseException
* A target table with the specified name is either being created or deleted.
* @throws ContinuousBackupsUnavailableException
* Backups have not yet been enabled for this table.
* @throws BackupInUseException
* There is another ongoing conflicting backup control plane operation on the table. The backup is either
* being created, deleted or restored to a table.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.CreateBackup
* @see AWS API
* Documentation
*/
@Override
public CreateBackupResult createBackup(CreateBackupRequest request) {
request = beforeClientExecution(request);
return executeCreateBackup(request);
}
@SdkInternalApi
final CreateBackupResult executeCreateBackup(CreateBackupRequest createBackupRequest) {
ExecutionContext executionContext = createExecutionContext(createBackupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateBackupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createBackupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateBackup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateBackupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a global table from an existing table. A global table creates a replication relationship between two or
* more DynamoDB tables with the same table name in the provided Regions.
*
*
*
* This operation only applies to Version
* 2017.11.29 of global tables.
*
*
*
* If you want to add a new replica table to a global table, each of the following conditions must be true:
*
*
* -
*
* The table must have the same primary key as all of the other replicas.
*
*
* -
*
* The table must have the same name as all of the other replicas.
*
*
* -
*
* The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the
* item.
*
*
* -
*
* None of the replica tables in the global table can contain any data.
*
*
*
*
* If global secondary indexes are specified, then the following conditions must also be met:
*
*
* -
*
* The global secondary indexes must have the same name.
*
*
* -
*
* The global secondary indexes must have the same hash key and sort key (if present).
*
*
*
*
* If local secondary indexes are specified, then the following conditions must also be met:
*
*
* -
*
* The local secondary indexes must have the same name.
*
*
* -
*
* The local secondary indexes must have the same hash key and sort key (if present).
*
*
*
*
*
* Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB
* strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables
* replicas and indexes.
*
*
* If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity
* units to your replica tables. You should also provision equal replicated write capacity units to matching
* secondary indexes across your global table.
*
*
*
* @param createGlobalTableRequest
* @return Result of the CreateGlobalTable operation returned by the service.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws GlobalTableAlreadyExistsException
* The specified global table already exists.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @sample AmazonDynamoDB.CreateGlobalTable
* @see AWS API
* Documentation
*/
@Override
public CreateGlobalTableResult createGlobalTable(CreateGlobalTableRequest request) {
request = beforeClientExecution(request);
return executeCreateGlobalTable(request);
}
@SdkInternalApi
final CreateGlobalTableResult executeCreateGlobalTable(CreateGlobalTableRequest createGlobalTableRequest) {
ExecutionContext executionContext = createExecutionContext(createGlobalTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateGlobalTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createGlobalTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateGlobalTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateGlobalTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* The CreateTable
operation adds a new table to your account. In an AWS account, table names must be
* unique within each Region. That is, you can have two tables with same name if you create the tables in different
* Regions.
*
*
* CreateTable
is an asynchronous operation. Upon receiving a CreateTable
request,
* DynamoDB immediately returns a response with a TableStatus
of CREATING
. After the table
* is created, DynamoDB sets the TableStatus
to ACTIVE
. You can perform read and write
* operations only on an ACTIVE
table.
*
*
* You can optionally define secondary indexes on the new table, as part of the CreateTable
operation.
* If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially.
* Only one table with secondary indexes can be in the CREATING
state at any given time.
*
*
* You can use the DescribeTable
action to check the table status.
*
*
* @param createTableRequest
* Represents the input of a CreateTable
operation.
* @return Result of the CreateTable operation returned by the service.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.CreateTable
* @see AWS API
* Documentation
*/
@Override
public CreateTableResult createTable(CreateTableRequest request) {
request = beforeClientExecution(request);
return executeCreateTable(request);
}
@SdkInternalApi
final CreateTableResult executeCreateTable(CreateTableRequest createTableRequest) {
ExecutionContext executionContext = createExecutionContext(createTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public CreateTableResult createTable(java.util.List attributeDefinitions, String tableName,
java.util.List keySchema, ProvisionedThroughput provisionedThroughput) {
return createTable(new CreateTableRequest().withAttributeDefinitions(attributeDefinitions).withTableName(tableName).withKeySchema(keySchema)
.withProvisionedThroughput(provisionedThroughput));
}
/**
*
* Deletes an existing backup of a table.
*
*
* You can call DeleteBackup
at a maximum rate of 10 times per second.
*
*
* @param deleteBackupRequest
* @return Result of the DeleteBackup operation returned by the service.
* @throws BackupNotFoundException
* Backup not found for the given BackupARN.
* @throws BackupInUseException
* There is another ongoing conflicting backup control plane operation on the table. The backup is either
* being created, deleted or restored to a table.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DeleteBackup
* @see AWS API
* Documentation
*/
@Override
public DeleteBackupResult deleteBackup(DeleteBackupRequest request) {
request = beforeClientExecution(request);
return executeDeleteBackup(request);
}
@SdkInternalApi
final DeleteBackupResult executeDeleteBackup(DeleteBackupRequest deleteBackupRequest) {
ExecutionContext executionContext = createExecutionContext(deleteBackupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteBackupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteBackupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteBackup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteBackupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the
* item if it exists, or if it has an expected attribute value.
*
*
* In addition to deleting an item, you can also return the item's attribute values in the same operation, using the
* ReturnValues
parameter.
*
*
* Unless you specify conditions, the DeleteItem
is an idempotent operation; running it multiple times
* on the same item or attribute does not result in an error response.
*
*
* Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are
* met, DynamoDB performs the delete. Otherwise, the item is not deleted.
*
*
* @param deleteItemRequest
* Represents the input of a DeleteItem
operation.
* @return Result of the DeleteItem operation returned by the service.
* @throws ConditionalCheckFailedException
* A condition specified in the operation could not be evaluated.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ItemCollectionSizeLimitExceededException
* An item collection is too large. This exception is only returned for tables that have one or more local
* secondary indexes.
* @throws TransactionConflictException
* Operation was rejected because there is an ongoing transaction for the item.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DeleteItem
* @see AWS API
* Documentation
*/
@Override
public DeleteItemResult deleteItem(DeleteItemRequest request) {
request = beforeClientExecution(request);
return executeDeleteItem(request);
}
@SdkInternalApi
final DeleteItemResult executeDeleteItem(DeleteItemRequest deleteItemRequest) {
ExecutionContext executionContext = createExecutionContext(deleteItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DeleteItemResult deleteItem(String tableName, java.util.Map key) {
return deleteItem(new DeleteItemRequest().withTableName(tableName).withKey(key));
}
@Override
public DeleteItemResult deleteItem(String tableName, java.util.Map key, String returnValues) {
return deleteItem(new DeleteItemRequest().withTableName(tableName).withKey(key).withReturnValues(returnValues));
}
/**
*
* The DeleteTable
operation deletes a table and all of its items. After a DeleteTable
* request, the specified table is in the DELETING
state until DynamoDB completes the deletion. If the
* table is in the ACTIVE
state, you can delete it. If a table is in CREATING
or
* UPDATING
states, then DynamoDB returns a ResourceInUseException
. If the specified table
* does not exist, DynamoDB returns a ResourceNotFoundException
. If table is already in the
* DELETING
state, no error is returned.
*
*
*
* DynamoDB might continue to accept data read and write operations, such as GetItem
and
* PutItem
, on a table in the DELETING
state until the table deletion is complete.
*
*
*
* When you delete a table, any indexes on that table are also deleted.
*
*
* If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the
* DISABLED
state, and the stream is automatically deleted after 24 hours.
*
*
* Use the DescribeTable
action to check the status of the table.
*
*
* @param deleteTableRequest
* Represents the input of a DeleteTable
operation.
* @return Result of the DeleteTable operation returned by the service.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DeleteTable
* @see AWS API
* Documentation
*/
@Override
public DeleteTableResult deleteTable(DeleteTableRequest request) {
request = beforeClientExecution(request);
return executeDeleteTable(request);
}
@SdkInternalApi
final DeleteTableResult executeDeleteTable(DeleteTableRequest deleteTableRequest) {
ExecutionContext executionContext = createExecutionContext(deleteTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DeleteTableResult deleteTable(String tableName) {
return deleteTable(new DeleteTableRequest().withTableName(tableName));
}
/**
*
* Describes an existing backup of a table.
*
*
* You can call DescribeBackup
at a maximum rate of 10 times per second.
*
*
* @param describeBackupRequest
* @return Result of the DescribeBackup operation returned by the service.
* @throws BackupNotFoundException
* Backup not found for the given BackupARN.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeBackup
* @see AWS API
* Documentation
*/
@Override
public DescribeBackupResult describeBackup(DescribeBackupRequest request) {
request = beforeClientExecution(request);
return executeDescribeBackup(request);
}
@SdkInternalApi
final DescribeBackupResult executeDescribeBackup(DescribeBackupRequest describeBackupRequest) {
ExecutionContext executionContext = createExecutionContext(describeBackupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeBackupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeBackupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeBackup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeBackupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are
* ENABLED
on all tables at table creation. If point in time recovery is enabled,
* PointInTimeRecoveryStatus
will be set to ENABLED.
*
*
* After continuous backups and point in time recovery are enabled, you can restore to any point in time within
* EarliestRestorableDateTime
and LatestRestorableDateTime
.
*
*
* LatestRestorableDateTime
is typically 5 minutes before the current time. You can restore your table
* to any point in time during the last 35 days.
*
*
* You can call DescribeContinuousBackups
at a maximum rate of 10 times per second.
*
*
* @param describeContinuousBackupsRequest
* @return Result of the DescribeContinuousBackups operation returned by the service.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeContinuousBackups
* @see AWS API Documentation
*/
@Override
public DescribeContinuousBackupsResult describeContinuousBackups(DescribeContinuousBackupsRequest request) {
request = beforeClientExecution(request);
return executeDescribeContinuousBackups(request);
}
@SdkInternalApi
final DescribeContinuousBackupsResult executeDescribeContinuousBackups(DescribeContinuousBackupsRequest describeContinuousBackupsRequest) {
ExecutionContext executionContext = createExecutionContext(describeContinuousBackupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeContinuousBackupsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeContinuousBackupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeContinuousBackups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeContinuousBackupsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns information about contributor insights, for a given table or global secondary index.
*
*
* @param describeContributorInsightsRequest
* @return Result of the DescribeContributorInsights operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeContributorInsights
* @see AWS API Documentation
*/
@Override
public DescribeContributorInsightsResult describeContributorInsights(DescribeContributorInsightsRequest request) {
request = beforeClientExecution(request);
return executeDescribeContributorInsights(request);
}
@SdkInternalApi
final DescribeContributorInsightsResult executeDescribeContributorInsights(DescribeContributorInsightsRequest describeContributorInsightsRequest) {
ExecutionContext executionContext = createExecutionContext(describeContributorInsightsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeContributorInsightsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeContributorInsightsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeContributorInsights");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeContributorInsightsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns the regional endpoint information.
*
*
* @param describeEndpointsRequest
* @return Result of the DescribeEndpoints operation returned by the service.
* @sample AmazonDynamoDB.DescribeEndpoints
* @see AWS API
* Documentation
*/
@Override
public DescribeEndpointsResult describeEndpoints(DescribeEndpointsRequest request) {
request = beforeClientExecution(request);
return executeDescribeEndpoints(request);
}
@SdkInternalApi
final DescribeEndpointsResult executeDescribeEndpoints(DescribeEndpointsRequest describeEndpointsRequest) {
ExecutionContext executionContext = createExecutionContext(describeEndpointsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeEndpointsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeEndpointsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeEndpoints");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeEndpointsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes an existing table export.
*
*
* @param describeExportRequest
* @return Result of the DescribeExport operation returned by the service.
* @throws ExportNotFoundException
* The specified export was not found.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeExport
* @see AWS API
* Documentation
*/
@Override
public DescribeExportResult describeExport(DescribeExportRequest request) {
request = beforeClientExecution(request);
return executeDescribeExport(request);
}
@SdkInternalApi
final DescribeExportResult executeDescribeExport(DescribeExportRequest describeExportRequest) {
ExecutionContext executionContext = createExecutionContext(describeExportRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeExportRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeExportRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeExport");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeExportResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns information about the specified global table.
*
*
*
* This operation only applies to Version
* 2017.11.29 of global tables. If you are using global tables Version
* 2019.11.21 you can use DescribeTable
* instead.
*
*
*
* @param describeGlobalTableRequest
* @return Result of the DescribeGlobalTable operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws GlobalTableNotFoundException
* The specified global table does not exist.
* @sample AmazonDynamoDB.DescribeGlobalTable
* @see AWS
* API Documentation
*/
@Override
public DescribeGlobalTableResult describeGlobalTable(DescribeGlobalTableRequest request) {
request = beforeClientExecution(request);
return executeDescribeGlobalTable(request);
}
@SdkInternalApi
final DescribeGlobalTableResult executeDescribeGlobalTable(DescribeGlobalTableRequest describeGlobalTableRequest) {
ExecutionContext executionContext = createExecutionContext(describeGlobalTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeGlobalTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeGlobalTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeGlobalTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeGlobalTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Describes Region-specific settings for a global table.
*
*
*
* This operation only applies to Version
* 2017.11.29 of global tables.
*
*
*
* @param describeGlobalTableSettingsRequest
* @return Result of the DescribeGlobalTableSettings operation returned by the service.
* @throws GlobalTableNotFoundException
* The specified global table does not exist.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeGlobalTableSettings
* @see AWS API Documentation
*/
@Override
public DescribeGlobalTableSettingsResult describeGlobalTableSettings(DescribeGlobalTableSettingsRequest request) {
request = beforeClientExecution(request);
return executeDescribeGlobalTableSettings(request);
}
@SdkInternalApi
final DescribeGlobalTableSettingsResult executeDescribeGlobalTableSettings(DescribeGlobalTableSettingsRequest describeGlobalTableSettingsRequest) {
ExecutionContext executionContext = createExecutionContext(describeGlobalTableSettingsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeGlobalTableSettingsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeGlobalTableSettingsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeGlobalTableSettings");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeGlobalTableSettingsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns information about the status of Kinesis streaming.
*
*
* @param describeKinesisStreamingDestinationRequest
* @return Result of the DescribeKinesisStreamingDestination operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeKinesisStreamingDestination
* @see AWS API Documentation
*/
@Override
public DescribeKinesisStreamingDestinationResult describeKinesisStreamingDestination(DescribeKinesisStreamingDestinationRequest request) {
request = beforeClientExecution(request);
return executeDescribeKinesisStreamingDestination(request);
}
@SdkInternalApi
final DescribeKinesisStreamingDestinationResult executeDescribeKinesisStreamingDestination(
DescribeKinesisStreamingDestinationRequest describeKinesisStreamingDestinationRequest) {
ExecutionContext executionContext = createExecutionContext(describeKinesisStreamingDestinationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeKinesisStreamingDestinationRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeKinesisStreamingDestinationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeKinesisStreamingDestination");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeKinesisStreamingDestinationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns the current provisioned-capacity quotas for your AWS account in a Region, both for the Region as a whole
* and for any one DynamoDB table that you create there.
*
*
* When you establish an AWS account, the account has initial quotas on the maximum read capacity units and write
* capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are
* per-table quotas that apply when you create a table there. For more information, see Service, Account, and Table
* Quotas page in the Amazon DynamoDB Developer Guide.
*
*
* Although you can increase these quotas by filing a case at AWS Support Center, obtaining the increase is not
* instantaneous. The DescribeLimits
action lets you write code to compare the capacity you are
* currently using to those quotas imposed by your account so that you have enough time to apply for an increase
* before you hit a quota.
*
*
* For example, you could use one of the AWS SDKs to do the following:
*
*
* -
*
* Call DescribeLimits
for a particular Region to obtain your current account quotas on provisioned
* capacity there.
*
*
* -
*
* Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and
* one to hold the aggregate write capacity units. Zero them both.
*
*
* -
*
* Call ListTables
to obtain a list of all your DynamoDB tables.
*
*
* -
*
* For each table name listed by ListTables
, do the following:
*
*
* -
*
* Call DescribeTable
with the table name.
*
*
* -
*
* Use the data returned by DescribeTable
to add the read capacity units and write capacity units
* provisioned for the table itself to your variables.
*
*
* -
*
* If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned
* capacity values to your variables as well.
*
*
*
*
* -
*
* Report the account quotas for that Region returned by DescribeLimits
, along with the total current
* provisioned capacity levels you have calculated.
*
*
*
*
* This will let you see whether you are getting close to your account-level quotas.
*
*
* The per-table quotas apply only when you are creating a new table. They restrict the sum of the provisioned
* capacity of the new table itself and all its global secondary indexes.
*
*
* For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly, but
* the only quota that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot
* exceed either of the per-account quotas.
*
*
*
* DescribeLimits
should only be called periodically. You can expect throttling errors if you call it
* more than once in a minute.
*
*
*
* The DescribeLimits
Request element has no content.
*
*
* @param describeLimitsRequest
* Represents the input of a DescribeLimits
operation. Has no content.
* @return Result of the DescribeLimits operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeLimits
* @see AWS API
* Documentation
*/
@Override
public DescribeLimitsResult describeLimits(DescribeLimitsRequest request) {
request = beforeClientExecution(request);
return executeDescribeLimits(request);
}
@SdkInternalApi
final DescribeLimitsResult executeDescribeLimits(DescribeLimitsRequest describeLimitsRequest) {
ExecutionContext executionContext = createExecutionContext(describeLimitsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeLimitsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeLimitsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeLimits");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeLimitsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns information about the table, including the current status of the table, when it was created, the primary
* key schema, and any indexes on the table.
*
*
*
* If you issue a DescribeTable
request immediately after a CreateTable
request, DynamoDB
* might return a ResourceNotFoundException
. This is because DescribeTable
uses an
* eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a
* few seconds, and then try the DescribeTable
request again.
*
*
*
* @param describeTableRequest
* Represents the input of a DescribeTable
operation.
* @return Result of the DescribeTable operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeTable
* @see AWS API
* Documentation
*/
@Override
public DescribeTableResult describeTable(DescribeTableRequest request) {
request = beforeClientExecution(request);
return executeDescribeTable(request);
}
@SdkInternalApi
final DescribeTableResult executeDescribeTable(DescribeTableRequest describeTableRequest) {
ExecutionContext executionContext = createExecutionContext(describeTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public DescribeTableResult describeTable(String tableName) {
return describeTable(new DescribeTableRequest().withTableName(tableName));
}
/**
*
* Describes auto scaling settings across replicas of the global table at once.
*
*
*
* This operation only applies to Version
* 2019.11.21 of global tables.
*
*
*
* @param describeTableReplicaAutoScalingRequest
* @return Result of the DescribeTableReplicaAutoScaling operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeTableReplicaAutoScaling
* @see AWS API Documentation
*/
@Override
public DescribeTableReplicaAutoScalingResult describeTableReplicaAutoScaling(DescribeTableReplicaAutoScalingRequest request) {
request = beforeClientExecution(request);
return executeDescribeTableReplicaAutoScaling(request);
}
@SdkInternalApi
final DescribeTableReplicaAutoScalingResult executeDescribeTableReplicaAutoScaling(
DescribeTableReplicaAutoScalingRequest describeTableReplicaAutoScalingRequest) {
ExecutionContext executionContext = createExecutionContext(describeTableReplicaAutoScalingRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTableReplicaAutoScalingRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeTableReplicaAutoScalingRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTableReplicaAutoScaling");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeTableReplicaAutoScalingResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Gives a description of the Time to Live (TTL) status on the specified table.
*
*
* @param describeTimeToLiveRequest
* @return Result of the DescribeTimeToLive operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.DescribeTimeToLive
* @see AWS
* API Documentation
*/
@Override
public DescribeTimeToLiveResult describeTimeToLive(DescribeTimeToLiveRequest request) {
request = beforeClientExecution(request);
return executeDescribeTimeToLive(request);
}
@SdkInternalApi
final DescribeTimeToLiveResult executeDescribeTimeToLive(DescribeTimeToLiveRequest describeTimeToLiveRequest) {
ExecutionContext executionContext = createExecutionContext(describeTimeToLiveRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTimeToLiveRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeTimeToLiveRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTimeToLive");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeTimeToLiveResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Stops replication from the DynamoDB table to the Kinesis data stream. This is done without deleting either of the
* resources.
*
*
* @param disableKinesisStreamingDestinationRequest
* @return Result of the DisableKinesisStreamingDestination operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @sample AmazonDynamoDB.DisableKinesisStreamingDestination
* @see AWS API Documentation
*/
@Override
public DisableKinesisStreamingDestinationResult disableKinesisStreamingDestination(DisableKinesisStreamingDestinationRequest request) {
request = beforeClientExecution(request);
return executeDisableKinesisStreamingDestination(request);
}
@SdkInternalApi
final DisableKinesisStreamingDestinationResult executeDisableKinesisStreamingDestination(
DisableKinesisStreamingDestinationRequest disableKinesisStreamingDestinationRequest) {
ExecutionContext executionContext = createExecutionContext(disableKinesisStreamingDestinationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DisableKinesisStreamingDestinationRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(disableKinesisStreamingDestinationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DisableKinesisStreamingDestination");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DisableKinesisStreamingDestinationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts table data replication to the specified Kinesis data stream at a timestamp chosen during the enable
* workflow. If this operation doesn't return results immediately, use DescribeKinesisStreamingDestination to check
* if streaming to the Kinesis data stream is ACTIVE.
*
*
* @param enableKinesisStreamingDestinationRequest
* @return Result of the EnableKinesisStreamingDestination operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @sample AmazonDynamoDB.EnableKinesisStreamingDestination
* @see AWS API Documentation
*/
@Override
public EnableKinesisStreamingDestinationResult enableKinesisStreamingDestination(EnableKinesisStreamingDestinationRequest request) {
request = beforeClientExecution(request);
return executeEnableKinesisStreamingDestination(request);
}
@SdkInternalApi
final EnableKinesisStreamingDestinationResult executeEnableKinesisStreamingDestination(
EnableKinesisStreamingDestinationRequest enableKinesisStreamingDestinationRequest) {
ExecutionContext executionContext = createExecutionContext(enableKinesisStreamingDestinationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new EnableKinesisStreamingDestinationRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(enableKinesisStreamingDestinationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "EnableKinesisStreamingDestination");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new EnableKinesisStreamingDestinationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL.
*
*
* @param executeStatementRequest
* @return Result of the ExecuteStatement operation returned by the service.
* @throws ConditionalCheckFailedException
* A condition specified in the operation could not be evaluated.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ItemCollectionSizeLimitExceededException
* An item collection is too large. This exception is only returned for tables that have one or more local
* secondary indexes.
* @throws TransactionConflictException
* Operation was rejected because there is an ongoing transaction for the item.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws DuplicateItemException
* There was an attempt to insert an item with the same primary key as an item that already exists in the
* DynamoDB table.
* @sample AmazonDynamoDB.ExecuteStatement
* @see AWS API
* Documentation
*/
@Override
public ExecuteStatementResult executeStatement(ExecuteStatementRequest request) {
request = beforeClientExecution(request);
return executeExecuteStatement(request);
}
@SdkInternalApi
final ExecuteStatementResult executeExecuteStatement(ExecuteStatementRequest executeStatementRequest) {
ExecutionContext executionContext = createExecutionContext(executeStatementRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExecuteStatementRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(executeStatementRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExecuteStatement");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ExecuteStatementResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL.
*
*
* @param executeTransactionRequest
* @return Result of the ExecuteTransaction operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws TransactionCanceledException
* The entire transaction request was canceled.
*
* DynamoDB cancels a TransactWriteItems
request under the following circumstances:
*
*
* -
*
* A condition in one of the condition expressions is not met.
*
*
* -
*
* A table in the TransactWriteItems
request is in a different account or region.
*
*
* -
*
* More than one action in the TransactWriteItems
operation targets the same item.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large,
* or a similar validation error occurs because of changes made by the transaction.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
* DynamoDB cancels a TransactGetItems
request under the following circumstances:
*
*
* -
*
* There is an ongoing TransactGetItems
operation that conflicts with a concurrent
* PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
* request. In this case the TransactGetItems
operation fails with a
* TransactionCanceledException
.
*
*
* -
*
* A table in the TransactGetItems
request is in a different account or region.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
*
* If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property.
* This property is not set for other languages. Transaction cancellation reasons are ordered in the order
* of requested items, if an item has no error it will have NONE
code and Null
* message.
*
*
*
* Cancellation reason codes and possible error messages:
*
*
* -
*
* No Errors:
*
*
* -
*
* Code: NONE
*
*
* -
*
* Message: null
*
*
*
*
* -
*
* Conditional Check Failed:
*
*
* -
*
* Code: ConditionalCheckFailed
*
*
* -
*
* Message: The conditional request failed.
*
*
*
*
* -
*
* Item Collection Size Limit Exceeded:
*
*
* -
*
* Code: ItemCollectionSizeLimitExceeded
*
*
* -
*
* Message: Collection size exceeded.
*
*
*
*
* -
*
* Transaction Conflict:
*
*
* -
*
* Code: TransactionConflict
*
*
* -
*
* Message: Transaction is ongoing for the item.
*
*
*
*
* -
*
* Provisioned Throughput Exceeded:
*
*
* -
*
* Code: ProvisionedThroughputExceeded
*
*
* -
*
* Messages:
*
*
* -
*
* The level of configured provisioned throughput for the table was exceeded. Consider increasing your
* provisioning level with the UpdateTable API.
*
*
*
* This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
*
*
* -
*
* The level of configured provisioned throughput for one or more global secondary indexes of the table was
* exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes
* with the UpdateTable API.
*
*
*
* This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
*
*
*
*
*
*
* -
*
* Throttling Error:
*
*
* -
*
* Code: ThrottlingError
*
*
* -
*
* Messages:
*
*
* -
*
* Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your
* table or index so please try again shortly. If exceptions persist, check if you have a hot key:
* https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
*
*
*
* This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically
* scaling the table.
*
*
* -
*
* Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is
* automatically scaling your index so please try again shortly.
*
*
*
* This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically
* scaling the GSI.
*
*
*
*
*
*
* -
*
* Validation Error:
*
*
* -
*
* Code: ValidationError
*
*
* -
*
* Messages:
*
*
* -
*
* One or more parameter values were invalid.
*
*
* -
*
* The update expression attempted to update the secondary index key beyond allowed size limits.
*
*
* -
*
* The update expression attempted to update the secondary index key to unsupported type.
*
*
* -
*
* An operand in the update expression has an incorrect data type.
*
*
* -
*
* Item size to update has exceeded the maximum allowed size.
*
*
* -
*
* Number overflow. Attempting to store a number with magnitude larger than supported range.
*
*
* -
*
* Type mismatch for attribute to update.
*
*
* -
*
* Nesting Levels have exceeded supported limits.
*
*
* -
*
* The document path provided in the update expression is invalid for update.
*
*
* -
*
* The provided expression refers to an attribute that does not exist in the item.
*
*
*
*
*
*
* @throws TransactionInProgressException
* The transaction with the given request token is already in progress.
* @throws IdempotentParameterMismatchException
* DynamoDB rejected the request because you retried a request with a different payload but with an
* idempotent token that was already used.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ExecuteTransaction
* @see AWS
* API Documentation
*/
@Override
public ExecuteTransactionResult executeTransaction(ExecuteTransactionRequest request) {
request = beforeClientExecution(request);
return executeExecuteTransaction(request);
}
@SdkInternalApi
final ExecuteTransactionResult executeExecuteTransaction(ExecuteTransactionRequest executeTransactionRequest) {
ExecutionContext executionContext = createExecutionContext(executeTransactionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExecuteTransactionRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(executeTransactionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExecuteTransaction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ExecuteTransactionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Exports table data to an S3 bucket. The table must have point in time recovery enabled, and you can export data
* from any time within the point in time recovery window.
*
*
* @param exportTableToPointInTimeRequest
* @return Result of the ExportTableToPointInTime operation returned by the service.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @throws PointInTimeRecoveryUnavailableException
* Point in time recovery has not yet been enabled for this source table.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InvalidExportTimeException
* The specified ExportTime
is outside of the point in time recovery window.
* @throws ExportConflictException
* There was a conflict when writing to the specified S3 bucket.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ExportTableToPointInTime
* @see AWS API Documentation
*/
@Override
public ExportTableToPointInTimeResult exportTableToPointInTime(ExportTableToPointInTimeRequest request) {
request = beforeClientExecution(request);
return executeExportTableToPointInTime(request);
}
@SdkInternalApi
final ExportTableToPointInTimeResult executeExportTableToPointInTime(ExportTableToPointInTimeRequest exportTableToPointInTimeRequest) {
ExecutionContext executionContext = createExecutionContext(exportTableToPointInTimeRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ExportTableToPointInTimeRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(exportTableToPointInTimeRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ExportTableToPointInTime");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ExportTableToPointInTimeResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* The GetItem
operation returns a set of attributes for the item with the given primary key. If there
* is no matching item, GetItem
does not return any data and there will be no Item
element
* in the response.
*
*
* GetItem
provides an eventually consistent read by default. If your application requires a strongly
* consistent read, set ConsistentRead
to true
. Although a strongly consistent read might
* take more time than an eventually consistent read, it always returns the last updated value.
*
*
* @param getItemRequest
* Represents the input of a GetItem
operation.
* @return Result of the GetItem operation returned by the service.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.GetItem
* @see AWS API
* Documentation
*/
@Override
public GetItemResult getItem(GetItemRequest request) {
request = beforeClientExecution(request);
return executeGetItem(request);
}
@SdkInternalApi
final GetItemResult executeGetItem(GetItemRequest getItemRequest) {
ExecutionContext executionContext = createExecutionContext(getItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new GetItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(getItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new GetItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public GetItemResult getItem(String tableName, java.util.Map key) {
return getItem(new GetItemRequest().withTableName(tableName).withKey(key));
}
@Override
public GetItemResult getItem(String tableName, java.util.Map key, Boolean consistentRead) {
return getItem(new GetItemRequest().withTableName(tableName).withKey(key).withConsistentRead(consistentRead));
}
/**
*
* List backups associated with an AWS account. To list backups for a given table, specify TableName
.
* ListBackups
returns a paginated list of results with at most 1 MB worth of items in a page. You can
* also specify a maximum number of entries to be returned in a page.
*
*
* In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time
* at which the original backup was requested.
*
*
* You can call ListBackups
a maximum of five times per second.
*
*
* @param listBackupsRequest
* @return Result of the ListBackups operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListBackups
* @see AWS API
* Documentation
*/
@Override
public ListBackupsResult listBackups(ListBackupsRequest request) {
request = beforeClientExecution(request);
return executeListBackups(request);
}
@SdkInternalApi
final ListBackupsResult executeListBackups(ListBackupsRequest listBackupsRequest) {
ExecutionContext executionContext = createExecutionContext(listBackupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListBackupsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listBackupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListBackups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListBackupsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.
*
*
* @param listContributorInsightsRequest
* @return Result of the ListContributorInsights operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListContributorInsights
* @see AWS API Documentation
*/
@Override
public ListContributorInsightsResult listContributorInsights(ListContributorInsightsRequest request) {
request = beforeClientExecution(request);
return executeListContributorInsights(request);
}
@SdkInternalApi
final ListContributorInsightsResult executeListContributorInsights(ListContributorInsightsRequest listContributorInsightsRequest) {
ExecutionContext executionContext = createExecutionContext(listContributorInsightsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListContributorInsightsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(listContributorInsightsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListContributorInsights");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ListContributorInsightsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Lists completed exports within the past 90 days.
*
*
* @param listExportsRequest
* @return Result of the ListExports operation returned by the service.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListExports
* @see AWS API
* Documentation
*/
@Override
public ListExportsResult listExports(ListExportsRequest request) {
request = beforeClientExecution(request);
return executeListExports(request);
}
@SdkInternalApi
final ListExportsResult executeListExports(ListExportsRequest listExportsRequest) {
ExecutionContext executionContext = createExecutionContext(listExportsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListExportsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listExportsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListExports");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListExportsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Lists all global tables that have a replica in the specified Region.
*
*
*
* This operation only applies to Version
* 2017.11.29 of global tables.
*
*
*
* @param listGlobalTablesRequest
* @return Result of the ListGlobalTables operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListGlobalTables
* @see AWS API
* Documentation
*/
@Override
public ListGlobalTablesResult listGlobalTables(ListGlobalTablesRequest request) {
request = beforeClientExecution(request);
return executeListGlobalTables(request);
}
@SdkInternalApi
final ListGlobalTablesResult executeListGlobalTables(ListGlobalTablesRequest listGlobalTablesRequest) {
ExecutionContext executionContext = createExecutionContext(listGlobalTablesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListGlobalTablesRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listGlobalTablesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListGlobalTables");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListGlobalTablesResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns an array of table names associated with the current account and endpoint. The output from
* ListTables
is paginated, with each page returning a maximum of 100 table names.
*
*
* @param listTablesRequest
* Represents the input of a ListTables
operation.
* @return Result of the ListTables operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListTables
* @see AWS API
* Documentation
*/
@Override
public ListTablesResult listTables(ListTablesRequest request) {
request = beforeClientExecution(request);
return executeListTables(request);
}
@SdkInternalApi
final ListTablesResult executeListTables(ListTablesRequest listTablesRequest) {
ExecutionContext executionContext = createExecutionContext(listTablesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTablesRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listTablesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTables");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListTablesResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public ListTablesResult listTables() {
return listTables(new ListTablesRequest());
}
@Override
public ListTablesResult listTables(String exclusiveStartTableName) {
return listTables(new ListTablesRequest().withExclusiveStartTableName(exclusiveStartTableName));
}
@Override
public ListTablesResult listTables(String exclusiveStartTableName, Integer limit) {
return listTables(new ListTablesRequest().withExclusiveStartTableName(exclusiveStartTableName).withLimit(limit));
}
@Override
public ListTablesResult listTables(Integer limit) {
return listTables(new ListTablesRequest().withLimit(limit));
}
/**
*
* List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per
* account.
*
*
* For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in
* the Amazon DynamoDB Developer Guide.
*
*
* @param listTagsOfResourceRequest
* @return Result of the ListTagsOfResource operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.ListTagsOfResource
* @see AWS
* API Documentation
*/
@Override
public ListTagsOfResourceResult listTagsOfResource(ListTagsOfResourceRequest request) {
request = beforeClientExecution(request);
return executeListTagsOfResource(request);
}
@SdkInternalApi
final ListTagsOfResourceResult executeListTagsOfResource(ListTagsOfResourceRequest listTagsOfResourceRequest) {
ExecutionContext executionContext = createExecutionContext(listTagsOfResourceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTagsOfResourceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listTagsOfResourceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTagsOfResource");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListTagsOfResourceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new
* item already exists in the specified table, the new item completely replaces the existing item. You can perform a
* conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an
* existing item if it has certain attribute values. You can return the item's attribute values in the same
* operation, using the ReturnValues
parameter.
*
*
*
* This topic provides general information about the PutItem
API.
*
*
* For information on how to call the PutItem
API using the AWS SDK in specific languages, see the
* following:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
*
* When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be
* null.
*
*
* Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a
* length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes
* cannot be empty.
*
*
* Invalid Requests with empty values will be rejected with a ValidationException
exception.
*
*
*
* To prevent a new item from replacing an existing item, use a conditional expression that contains the
* attribute_not_exists
function with the name of the attribute being used as the partition key for the
* table. Since every record must contain that attribute, the attribute_not_exists
function will only
* succeed if no matching item exists.
*
*
*
* For more information about PutItem
, see Working with
* Items in the Amazon DynamoDB Developer Guide.
*
*
* @param putItemRequest
* Represents the input of a PutItem
operation.
* @return Result of the PutItem operation returned by the service.
* @throws ConditionalCheckFailedException
* A condition specified in the operation could not be evaluated.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ItemCollectionSizeLimitExceededException
* An item collection is too large. This exception is only returned for tables that have one or more local
* secondary indexes.
* @throws TransactionConflictException
* Operation was rejected because there is an ongoing transaction for the item.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.PutItem
* @see AWS API
* Documentation
*/
@Override
public PutItemResult putItem(PutItemRequest request) {
request = beforeClientExecution(request);
return executePutItem(request);
}
@SdkInternalApi
final PutItemResult executePutItem(PutItemRequest putItemRequest) {
ExecutionContext executionContext = createExecutionContext(putItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new PutItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(putItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new PutItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public PutItemResult putItem(String tableName, java.util.Map item) {
return putItem(new PutItemRequest().withTableName(tableName).withItem(item));
}
@Override
public PutItemResult putItem(String tableName, java.util.Map item, String returnValues) {
return putItem(new PutItemRequest().withTableName(tableName).withItem(item).withReturnValues(returnValues));
}
/**
*
* The Query
operation finds items based on primary key values. You can query any table or secondary
* index that has a composite primary key (a partition key and a sort key).
*
*
* Use the KeyConditionExpression
parameter to provide a specific value for the partition key. The
* Query
operation will return all of the items from the table or index with that partition key value.
* You can optionally narrow the scope of the Query
operation by specifying a sort key value and a
* comparison operator in KeyConditionExpression
. To further refine the Query
results, you
* can optionally provide a FilterExpression
. A FilterExpression
determines which items
* within the results should be returned to you. All of the other results are discarded.
*
*
* A Query
operation always returns a result set. If no matching items are found, the result set will
* be empty. Queries that do not return results consume the minimum number of read capacity units for that type of
* read operation.
*
*
*
* DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that
* is returned to an application. The number of capacity units consumed will be the same whether you request all of
* the attributes (the default behavior) or just some of them (using a projection expression). The number will also
* be the same whether or not you use a FilterExpression
.
*
*
*
* Query
results are always sorted by the sort key value. If the data type of the sort key is Number,
* the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By
* default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter to
* false.
*
*
* A single Query
operation will read up to the maximum number of items set (if using the
* Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using
* FilterExpression
. If LastEvaluatedKey
is present in the response, you will need to
* paginate the result set. For more information, see Paginating
* the Results in the Amazon DynamoDB Developer Guide.
*
*
* FilterExpression
is applied after a Query
finishes, but before the results are
* returned. A FilterExpression
cannot contain partition key or sort key attributes. You need to
* specify those attributes in the KeyConditionExpression
.
*
*
*
* A Query
operation can return an empty result set and a LastEvaluatedKey
if all the
* items read for the page of results are filtered out.
*
*
*
* You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local
* secondary index, you can set the ConsistentRead
parameter to true
and obtain a strongly
* consistent result. Global secondary indexes support eventually consistent reads only, so do not specify
* ConsistentRead
when querying a global secondary index.
*
*
* @param queryRequest
* Represents the input of a Query
operation.
* @return Result of the Query operation returned by the service.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.Query
* @see AWS API
* Documentation
*/
@Override
public QueryResult query(QueryRequest request) {
request = beforeClientExecution(request);
return executeQuery(request);
}
@SdkInternalApi
final QueryResult executeQuery(QueryRequest queryRequest) {
ExecutionContext executionContext = createExecutionContext(queryRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new QueryRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(queryRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "Query");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new QueryResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any
* type of restore) in a given account.
*
*
* You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
*
*
* You must manually set up the following on the restored table:
*
*
* -
*
* Auto scaling policies
*
*
* -
*
* IAM policies
*
*
* -
*
* Amazon CloudWatch metrics and alarms
*
*
* -
*
* Tags
*
*
* -
*
* Stream settings
*
*
* -
*
* Time to Live (TTL) settings
*
*
*
*
* @param restoreTableFromBackupRequest
* @return Result of the RestoreTableFromBackup operation returned by the service.
* @throws TableAlreadyExistsException
* A target table with the specified name already exists.
* @throws TableInUseException
* A target table with the specified name is either being created or deleted.
* @throws BackupNotFoundException
* Backup not found for the given BackupARN.
* @throws BackupInUseException
* There is another ongoing conflicting backup control plane operation on the table. The backup is either
* being created, deleted or restored to a table.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.RestoreTableFromBackup
* @see AWS API Documentation
*/
@Override
public RestoreTableFromBackupResult restoreTableFromBackup(RestoreTableFromBackupRequest request) {
request = beforeClientExecution(request);
return executeRestoreTableFromBackup(request);
}
@SdkInternalApi
final RestoreTableFromBackupResult executeRestoreTableFromBackup(RestoreTableFromBackupRequest restoreTableFromBackupRequest) {
ExecutionContext executionContext = createExecutionContext(restoreTableFromBackupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new RestoreTableFromBackupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(restoreTableFromBackupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "RestoreTableFromBackup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new RestoreTableFromBackupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Restores the specified table to the specified point in time within EarliestRestorableDateTime
and
* LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days.
* Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
*
*
* When you restore using point in time recovery, DynamoDB restores your table data to the state based on the
* selected date and time (day:hour:minute:second) to a new table.
*
*
* Along with data, the following are also included on the new restored table using point in time recovery:
*
*
* -
*
* Global secondary indexes (GSIs)
*
*
* -
*
* Local secondary indexes (LSIs)
*
*
* -
*
* Provisioned read and write capacity
*
*
* -
*
* Encryption settings
*
*
*
* All these settings come from the current settings of the source table at the time of restore.
*
*
*
*
* You must manually set up the following on the restored table:
*
*
* -
*
* Auto scaling policies
*
*
* -
*
* IAM policies
*
*
* -
*
* Amazon CloudWatch metrics and alarms
*
*
* -
*
* Tags
*
*
* -
*
* Stream settings
*
*
* -
*
* Time to Live (TTL) settings
*
*
* -
*
* Point in time recovery settings
*
*
*
*
* @param restoreTableToPointInTimeRequest
* @return Result of the RestoreTableToPointInTime operation returned by the service.
* @throws TableAlreadyExistsException
* A target table with the specified name already exists.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @throws TableInUseException
* A target table with the specified name is either being created or deleted.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InvalidRestoreTimeException
* An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime and
* LatestRestorableDateTime.
* @throws PointInTimeRecoveryUnavailableException
* Point in time recovery has not yet been enabled for this source table.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.RestoreTableToPointInTime
* @see AWS API Documentation
*/
@Override
public RestoreTableToPointInTimeResult restoreTableToPointInTime(RestoreTableToPointInTimeRequest request) {
request = beforeClientExecution(request);
return executeRestoreTableToPointInTime(request);
}
@SdkInternalApi
final RestoreTableToPointInTimeResult executeRestoreTableToPointInTime(RestoreTableToPointInTimeRequest restoreTableToPointInTimeRequest) {
ExecutionContext executionContext = createExecutionContext(restoreTableToPointInTimeRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new RestoreTableToPointInTimeRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(restoreTableToPointInTimeRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "RestoreTableToPointInTime");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new RestoreTableToPointInTimeResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* The Scan
operation returns one or more items and item attributes by accessing every item in a table
* or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
* operation.
*
*
* If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results
* are returned to the user as a LastEvaluatedKey
value to continue the scan in a subsequent operation.
* The results also include the number of items exceeding the limit. A scan can result in no table data meeting the
* filter criteria.
*
*
* A single Scan
operation reads up to the maximum number of items set (if using the Limit
* parameter) or a maximum of 1 MB of data and then apply any filtering to the results using
* FilterExpression
. If LastEvaluatedKey
is present in the response, you need to paginate
* the result set. For more information, see Paginating the
* Results in the Amazon DynamoDB Developer Guide.
*
*
* Scan
operations proceed sequentially; however, for faster performance on a large table or secondary
* index, applications can request a parallel Scan
operation by providing the Segment
and
* TotalSegments
parameters. For more information, see Parallel
* Scan in the Amazon DynamoDB Developer Guide.
*
*
* Scan
uses eventually consistent reads when accessing the data in a table; therefore, the result set
* might not include the changes to data in the table immediately before the operation began. If you need a
* consistent copy of the data, as of the time that the Scan
begins, you can set the
* ConsistentRead
parameter to true
.
*
*
* @param scanRequest
* Represents the input of a Scan
operation.
* @return Result of the Scan operation returned by the service.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.Scan
* @see AWS API
* Documentation
*/
@Override
public ScanResult scan(ScanRequest request) {
request = beforeClientExecution(request);
return executeScan(request);
}
@SdkInternalApi
final ScanResult executeScan(ScanRequest scanRequest) {
ExecutionContext executionContext = createExecutionContext(scanRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ScanRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(scanRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "Scan");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new ScanResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public ScanResult scan(String tableName, java.util.List attributesToGet) {
return scan(new ScanRequest().withTableName(tableName).withAttributesToGet(attributesToGet));
}
@Override
public ScanResult scan(String tableName, java.util.Map scanFilter) {
return scan(new ScanRequest().withTableName(tableName).withScanFilter(scanFilter));
}
@Override
public ScanResult scan(String tableName, java.util.List attributesToGet, java.util.Map scanFilter) {
return scan(new ScanRequest().withTableName(tableName).withAttributesToGet(attributesToGet).withScanFilter(scanFilter));
}
/**
*
* Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that
* they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up
* to five times per second, per account.
*
*
* For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in
* the Amazon DynamoDB Developer Guide.
*
*
* @param tagResourceRequest
* @return Result of the TagResource operation returned by the service.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @sample AmazonDynamoDB.TagResource
* @see AWS API
* Documentation
*/
@Override
public TagResourceResult tagResource(TagResourceRequest request) {
request = beforeClientExecution(request);
return executeTagResource(request);
}
@SdkInternalApi
final TagResourceResult executeTagResource(TagResourceRequest tagResourceRequest) {
ExecutionContext executionContext = createExecutionContext(tagResourceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new TagResourceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(tagResourceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "TagResource");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new TagResourceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* TransactGetItems
is a synchronous operation that atomically retrieves multiple items from one or
* more tables (but not from indexes) in a single account and Region. A TransactGetItems
call can
* contain up to 25 TransactGetItem
objects, each of which contains a Get
structure that
* specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems
* cannot retrieve items from tables in more than one AWS account or Region. The aggregate size of the items in the
* transaction cannot exceed 4 MB.
*
*
* DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
*
*
* -
*
* A conflicting operation is in the process of updating an item to be read.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
* -
*
* The aggregate size of the items in the transaction cannot exceed 4 MB.
*
*
*
*
* @param transactGetItemsRequest
* @return Result of the TransactGetItems operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws TransactionCanceledException
* The entire transaction request was canceled.
*
* DynamoDB cancels a TransactWriteItems
request under the following circumstances:
*
*
* -
*
* A condition in one of the condition expressions is not met.
*
*
* -
*
* A table in the TransactWriteItems
request is in a different account or region.
*
*
* -
*
* More than one action in the TransactWriteItems
operation targets the same item.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large,
* or a similar validation error occurs because of changes made by the transaction.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
* DynamoDB cancels a TransactGetItems
request under the following circumstances:
*
*
* -
*
* There is an ongoing TransactGetItems
operation that conflicts with a concurrent
* PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
* request. In this case the TransactGetItems
operation fails with a
* TransactionCanceledException
.
*
*
* -
*
* A table in the TransactGetItems
request is in a different account or region.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
*
* If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property.
* This property is not set for other languages. Transaction cancellation reasons are ordered in the order
* of requested items, if an item has no error it will have NONE
code and Null
* message.
*
*
*
* Cancellation reason codes and possible error messages:
*
*
* -
*
* No Errors:
*
*
* -
*
* Code: NONE
*
*
* -
*
* Message: null
*
*
*
*
* -
*
* Conditional Check Failed:
*
*
* -
*
* Code: ConditionalCheckFailed
*
*
* -
*
* Message: The conditional request failed.
*
*
*
*
* -
*
* Item Collection Size Limit Exceeded:
*
*
* -
*
* Code: ItemCollectionSizeLimitExceeded
*
*
* -
*
* Message: Collection size exceeded.
*
*
*
*
* -
*
* Transaction Conflict:
*
*
* -
*
* Code: TransactionConflict
*
*
* -
*
* Message: Transaction is ongoing for the item.
*
*
*
*
* -
*
* Provisioned Throughput Exceeded:
*
*
* -
*
* Code: ProvisionedThroughputExceeded
*
*
* -
*
* Messages:
*
*
* -
*
* The level of configured provisioned throughput for the table was exceeded. Consider increasing your
* provisioning level with the UpdateTable API.
*
*
*
* This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
*
*
* -
*
* The level of configured provisioned throughput for one or more global secondary indexes of the table was
* exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes
* with the UpdateTable API.
*
*
*
* This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
*
*
*
*
*
*
* -
*
* Throttling Error:
*
*
* -
*
* Code: ThrottlingError
*
*
* -
*
* Messages:
*
*
* -
*
* Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your
* table or index so please try again shortly. If exceptions persist, check if you have a hot key:
* https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
*
*
*
* This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically
* scaling the table.
*
*
* -
*
* Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is
* automatically scaling your index so please try again shortly.
*
*
*
* This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically
* scaling the GSI.
*
*
*
*
*
*
* -
*
* Validation Error:
*
*
* -
*
* Code: ValidationError
*
*
* -
*
* Messages:
*
*
* -
*
* One or more parameter values were invalid.
*
*
* -
*
* The update expression attempted to update the secondary index key beyond allowed size limits.
*
*
* -
*
* The update expression attempted to update the secondary index key to unsupported type.
*
*
* -
*
* An operand in the update expression has an incorrect data type.
*
*
* -
*
* Item size to update has exceeded the maximum allowed size.
*
*
* -
*
* Number overflow. Attempting to store a number with magnitude larger than supported range.
*
*
* -
*
* Type mismatch for attribute to update.
*
*
* -
*
* Nesting Levels have exceeded supported limits.
*
*
* -
*
* The document path provided in the update expression is invalid for update.
*
*
* -
*
* The provided expression refers to an attribute that does not exist in the item.
*
*
*
*
*
*
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.TransactGetItems
* @see AWS API
* Documentation
*/
@Override
public TransactGetItemsResult transactGetItems(TransactGetItemsRequest request) {
request = beforeClientExecution(request);
return executeTransactGetItems(request);
}
@SdkInternalApi
final TransactGetItemsResult executeTransactGetItems(TransactGetItemsRequest transactGetItemsRequest) {
ExecutionContext executionContext = createExecutionContext(transactGetItemsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new TransactGetItemsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(transactGetItemsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "TransactGetItems");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new TransactGetItemsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* TransactWriteItems
is a synchronous write operation that groups up to 25 action requests. These
* actions can target items in different tables, but not in different AWS accounts or Regions, and no two actions
* can target the same item. For example, you cannot both ConditionCheck
and Update
the
* same item. The aggregate size of the items in the transaction cannot exceed 4 MB.
*
*
* The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by
* the following objects:
*
*
* -
*
* Put
Initiates a PutItem
operation to write a new item. This structure
* specifies the primary key of the item to be written, the name of the table to write it in, an optional condition
* expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field
* indicating whether to retrieve the item's attributes if the condition is not met.
*
*
* -
*
* Update
Initiates an UpdateItem
operation to update an existing item. This
* structure specifies the primary key of the item to be updated, the name of the table where it resides, an
* optional condition expression that must be satisfied for the update to succeed, an expression that defines one or
* more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition
* is not met.
*
*
* -
*
* Delete
Initiates a DeleteItem
operation to delete an existing item. This
* structure specifies the primary key of the item to be deleted, the name of the table where it resides, an
* optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether
* to retrieve the item's attributes if the condition is not met.
*
*
* -
*
* ConditionCheck
Applies a condition to an item that is not being modified by the
* transaction. This structure specifies the primary key of the item to be checked, the name of the table where it
* resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating
* whether to retrieve the item's attributes if the condition is not met.
*
*
*
*
* DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
*
*
* -
*
* A condition in one of the condition expressions is not met.
*
*
* -
*
* An ongoing operation is in the process of updating the same item.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a
* similar validation error occurs because of changes made by the transaction.
*
*
* -
*
* The aggregate size of the items in the transaction exceeds 4 MB.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
* @param transactWriteItemsRequest
* @return Result of the TransactWriteItems operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws TransactionCanceledException
* The entire transaction request was canceled.
*
* DynamoDB cancels a TransactWriteItems
request under the following circumstances:
*
*
* -
*
* A condition in one of the condition expressions is not met.
*
*
* -
*
* A table in the TransactWriteItems
request is in a different account or region.
*
*
* -
*
* More than one action in the TransactWriteItems
operation targets the same item.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large,
* or a similar validation error occurs because of changes made by the transaction.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
* DynamoDB cancels a TransactGetItems
request under the following circumstances:
*
*
* -
*
* There is an ongoing TransactGetItems
operation that conflicts with a concurrent
* PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
* request. In this case the TransactGetItems
operation fails with a
* TransactionCanceledException
.
*
*
* -
*
* A table in the TransactGetItems
request is in a different account or region.
*
*
* -
*
* There is insufficient provisioned capacity for the transaction to be completed.
*
*
* -
*
* There is a user error, such as an invalid data format.
*
*
*
*
*
* If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property.
* This property is not set for other languages. Transaction cancellation reasons are ordered in the order
* of requested items, if an item has no error it will have NONE
code and Null
* message.
*
*
*
* Cancellation reason codes and possible error messages:
*
*
* -
*
* No Errors:
*
*
* -
*
* Code: NONE
*
*
* -
*
* Message: null
*
*
*
*
* -
*
* Conditional Check Failed:
*
*
* -
*
* Code: ConditionalCheckFailed
*
*
* -
*
* Message: The conditional request failed.
*
*
*
*
* -
*
* Item Collection Size Limit Exceeded:
*
*
* -
*
* Code: ItemCollectionSizeLimitExceeded
*
*
* -
*
* Message: Collection size exceeded.
*
*
*
*
* -
*
* Transaction Conflict:
*
*
* -
*
* Code: TransactionConflict
*
*
* -
*
* Message: Transaction is ongoing for the item.
*
*
*
*
* -
*
* Provisioned Throughput Exceeded:
*
*
* -
*
* Code: ProvisionedThroughputExceeded
*
*
* -
*
* Messages:
*
*
* -
*
* The level of configured provisioned throughput for the table was exceeded. Consider increasing your
* provisioning level with the UpdateTable API.
*
*
*
* This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
*
*
* -
*
* The level of configured provisioned throughput for one or more global secondary indexes of the table was
* exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes
* with the UpdateTable API.
*
*
*
* This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
*
*
*
*
*
*
* -
*
* Throttling Error:
*
*
* -
*
* Code: ThrottlingError
*
*
* -
*
* Messages:
*
*
* -
*
* Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your
* table or index so please try again shortly. If exceptions persist, check if you have a hot key:
* https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
*
*
*
* This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically
* scaling the table.
*
*
* -
*
* Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is
* automatically scaling your index so please try again shortly.
*
*
*
* This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically
* scaling the GSI.
*
*
*
*
*
*
* -
*
* Validation Error:
*
*
* -
*
* Code: ValidationError
*
*
* -
*
* Messages:
*
*
* -
*
* One or more parameter values were invalid.
*
*
* -
*
* The update expression attempted to update the secondary index key beyond allowed size limits.
*
*
* -
*
* The update expression attempted to update the secondary index key to unsupported type.
*
*
* -
*
* An operand in the update expression has an incorrect data type.
*
*
* -
*
* Item size to update has exceeded the maximum allowed size.
*
*
* -
*
* Number overflow. Attempting to store a number with magnitude larger than supported range.
*
*
* -
*
* Type mismatch for attribute to update.
*
*
* -
*
* Nesting Levels have exceeded supported limits.
*
*
* -
*
* The document path provided in the update expression is invalid for update.
*
*
* -
*
* The provided expression refers to an attribute that does not exist in the item.
*
*
*
*
*
*
* @throws TransactionInProgressException
* The transaction with the given request token is already in progress.
* @throws IdempotentParameterMismatchException
* DynamoDB rejected the request because you retried a request with a different payload but with an
* idempotent token that was already used.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.TransactWriteItems
* @see AWS
* API Documentation
*/
@Override
public TransactWriteItemsResult transactWriteItems(TransactWriteItemsRequest request) {
request = beforeClientExecution(request);
return executeTransactWriteItems(request);
}
@SdkInternalApi
final TransactWriteItemsResult executeTransactWriteItems(TransactWriteItemsRequest transactWriteItemsRequest) {
ExecutionContext executionContext = createExecutionContext(transactWriteItemsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new TransactWriteItemsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(transactWriteItemsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "TransactWriteItems");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new TransactWriteItemsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource
up to
* five times per second, per account.
*
*
* For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in
* the Amazon DynamoDB Developer Guide.
*
*
* @param untagResourceRequest
* @return Result of the UntagResource operation returned by the service.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @sample AmazonDynamoDB.UntagResource
* @see AWS API
* Documentation
*/
@Override
public UntagResourceResult untagResource(UntagResourceRequest request) {
request = beforeClientExecution(request);
return executeUntagResource(request);
}
@SdkInternalApi
final UntagResourceResult executeUntagResource(UntagResourceRequest untagResourceRequest) {
ExecutionContext executionContext = createExecutionContext(untagResourceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UntagResourceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(untagResourceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UntagResource");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UntagResourceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* UpdateContinuousBackups
enables or disables point in time recovery for the specified table. A
* successful UpdateContinuousBackups
call returns the current
* ContinuousBackupsDescription
. Continuous backups are ENABLED
on all tables at table
* creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
*
*
* Once continuous backups and point in time recovery are enabled, you can restore to any point in time within
* EarliestRestorableDateTime
and LatestRestorableDateTime
.
*
*
* LatestRestorableDateTime
is typically 5 minutes before the current time. You can restore your table
* to any point in time during the last 35 days.
*
*
* @param updateContinuousBackupsRequest
* @return Result of the UpdateContinuousBackups operation returned by the service.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @throws ContinuousBackupsUnavailableException
* Backups have not yet been enabled for this table.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateContinuousBackups
* @see AWS API Documentation
*/
@Override
public UpdateContinuousBackupsResult updateContinuousBackups(UpdateContinuousBackupsRequest request) {
request = beforeClientExecution(request);
return executeUpdateContinuousBackups(request);
}
@SdkInternalApi
final UpdateContinuousBackupsResult executeUpdateContinuousBackups(UpdateContinuousBackupsRequest updateContinuousBackupsRequest) {
ExecutionContext executionContext = createExecutionContext(updateContinuousBackupsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateContinuousBackupsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateContinuousBackupsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateContinuousBackups");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateContinuousBackupsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates the status for contributor insights for a specific table or index.
*
*
* @param updateContributorInsightsRequest
* @return Result of the UpdateContributorInsights operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateContributorInsights
* @see AWS API Documentation
*/
@Override
public UpdateContributorInsightsResult updateContributorInsights(UpdateContributorInsightsRequest request) {
request = beforeClientExecution(request);
return executeUpdateContributorInsights(request);
}
@SdkInternalApi
final UpdateContributorInsightsResult executeUpdateContributorInsights(UpdateContributorInsightsRequest updateContributorInsightsRequest) {
ExecutionContext executionContext = createExecutionContext(updateContributorInsightsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateContributorInsightsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateContributorInsightsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateContributorInsights");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateContributorInsightsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Adds or removes replicas in the specified global table. The global table must already exist to be able to use
* this operation. Any replica to be added must be empty, have the same name as the global table, have the same key
* schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.
*
*
*
* Although you can use UpdateGlobalTable
to add replicas and remove replicas in a single request, for
* simplicity we recommend that you issue separate requests for adding or removing replicas.
*
*
*
* If global secondary indexes are specified, then the following conditions must also be met:
*
*
* -
*
* The global secondary indexes must have the same name.
*
*
* -
*
* The global secondary indexes must have the same hash key and sort key (if present).
*
*
* -
*
* The global secondary indexes must have the same provisioned and maximum write capacity units.
*
*
*
*
* @param updateGlobalTableRequest
* @return Result of the UpdateGlobalTable operation returned by the service.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @throws GlobalTableNotFoundException
* The specified global table does not exist.
* @throws ReplicaAlreadyExistsException
* The specified replica is already part of the global table.
* @throws ReplicaNotFoundException
* The specified replica is no longer part of the global table.
* @throws TableNotFoundException
* A source table with the name TableName
does not currently exist within the subscriber's
* account.
* @sample AmazonDynamoDB.UpdateGlobalTable
* @see AWS API
* Documentation
*/
@Override
public UpdateGlobalTableResult updateGlobalTable(UpdateGlobalTableRequest request) {
request = beforeClientExecution(request);
return executeUpdateGlobalTable(request);
}
@SdkInternalApi
final UpdateGlobalTableResult executeUpdateGlobalTable(UpdateGlobalTableRequest updateGlobalTableRequest) {
ExecutionContext executionContext = createExecutionContext(updateGlobalTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateGlobalTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateGlobalTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateGlobalTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateGlobalTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Updates settings for a global table.
*
*
* @param updateGlobalTableSettingsRequest
* @return Result of the UpdateGlobalTableSettings operation returned by the service.
* @throws GlobalTableNotFoundException
* The specified global table does not exist.
* @throws ReplicaNotFoundException
* The specified replica is no longer part of the global table.
* @throws IndexNotFoundException
* The operation tried to access a nonexistent index.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateGlobalTableSettings
* @see AWS API Documentation
*/
@Override
public UpdateGlobalTableSettingsResult updateGlobalTableSettings(UpdateGlobalTableSettingsRequest request) {
request = beforeClientExecution(request);
return executeUpdateGlobalTableSettings(request);
}
@SdkInternalApi
final UpdateGlobalTableSettingsResult executeUpdateGlobalTableSettings(UpdateGlobalTableSettingsRequest updateGlobalTableSettingsRequest) {
ExecutionContext executionContext = createExecutionContext(updateGlobalTableSettingsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateGlobalTableSettingsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateGlobalTableSettingsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateGlobalTableSettings");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateGlobalTableSettingsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put,
* delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new
* attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected
* attribute values).
*
*
* You can also return the item's attribute values in the same UpdateItem
operation using the
* ReturnValues
parameter.
*
*
* @param updateItemRequest
* Represents the input of an UpdateItem
operation.
* @return Result of the UpdateItem operation returned by the service.
* @throws ConditionalCheckFailedException
* A condition specified in the operation could not be evaluated.
* @throws ProvisionedThroughputExceededException
* Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this
* exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce
* the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ItemCollectionSizeLimitExceededException
* An item collection is too large. This exception is only returned for tables that have one or more local
* secondary indexes.
* @throws TransactionConflictException
* Operation was rejected because there is an ongoing transaction for the item.
* @throws RequestLimitExceededException
* Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateItem
* @see AWS API
* Documentation
*/
@Override
public UpdateItemResult updateItem(UpdateItemRequest request) {
request = beforeClientExecution(request);
return executeUpdateItem(request);
}
@SdkInternalApi
final UpdateItemResult executeUpdateItem(UpdateItemRequest updateItemRequest) {
ExecutionContext executionContext = createExecutionContext(updateItemRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateItemRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateItemRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateItem");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateItemResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public UpdateItemResult updateItem(String tableName, java.util.Map key, java.util.Map attributeUpdates) {
return updateItem(new UpdateItemRequest().withTableName(tableName).withKey(key).withAttributeUpdates(attributeUpdates));
}
@Override
public UpdateItemResult updateItem(String tableName, java.util.Map key,
java.util.Map attributeUpdates, String returnValues) {
return updateItem(new UpdateItemRequest().withTableName(tableName).withKey(key).withAttributeUpdates(attributeUpdates).withReturnValues(returnValues));
}
/**
*
* Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given
* table.
*
*
* You can only perform one of the following operations at once:
*
*
* -
*
* Modify the provisioned throughput settings of the table.
*
*
* -
*
* Enable or disable DynamoDB Streams on the table.
*
*
* -
*
* Remove a global secondary index from the table.
*
*
* -
*
* Create a new global secondary index on the table. After the index begins backfilling, you can use
* UpdateTable
to perform other operations.
*
*
*
*
* UpdateTable
is an asynchronous operation; while it is executing, the table status changes from
* ACTIVE
to UPDATING
. While it is UPDATING
, you cannot issue another
* UpdateTable
request. When the table returns to the ACTIVE
state, the
* UpdateTable
operation is complete.
*
*
* @param updateTableRequest
* Represents the input of an UpdateTable
operation.
* @return Result of the UpdateTable operation returned by the service.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateTable
* @see AWS API
* Documentation
*/
@Override
public UpdateTableResult updateTable(UpdateTableRequest request) {
request = beforeClientExecution(request);
return executeUpdateTable(request);
}
@SdkInternalApi
final UpdateTableResult executeUpdateTable(UpdateTableRequest updateTableRequest) {
ExecutionContext executionContext = createExecutionContext(updateTableRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateTableRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateTableRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateTable");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateTableResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
@Override
public UpdateTableResult updateTable(String tableName, ProvisionedThroughput provisionedThroughput) {
return updateTable(new UpdateTableRequest().withTableName(tableName).withProvisionedThroughput(provisionedThroughput));
}
/**
*
* Updates auto scaling settings on your global tables at once.
*
*
*
* This operation only applies to Version
* 2019.11.21 of global tables.
*
*
*
* @param updateTableReplicaAutoScalingRequest
* @return Result of the UpdateTableReplicaAutoScaling operation returned by the service.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateTableReplicaAutoScaling
* @see AWS API Documentation
*/
@Override
public UpdateTableReplicaAutoScalingResult updateTableReplicaAutoScaling(UpdateTableReplicaAutoScalingRequest request) {
request = beforeClientExecution(request);
return executeUpdateTableReplicaAutoScaling(request);
}
@SdkInternalApi
final UpdateTableReplicaAutoScalingResult executeUpdateTableReplicaAutoScaling(UpdateTableReplicaAutoScalingRequest updateTableReplicaAutoScalingRequest) {
ExecutionContext executionContext = createExecutionContext(updateTableReplicaAutoScalingRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateTableReplicaAutoScalingRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateTableReplicaAutoScalingRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateTableReplicaAutoScaling");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateTableReplicaAutoScalingResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* The UpdateTimeToLive
method enables or disables Time to Live (TTL) for the specified table. A
* successful UpdateTimeToLive
call returns the current TimeToLiveSpecification
. It can
* take up to one hour for the change to fully process. Any additional UpdateTimeToLive
calls for the
* same table during this one hour duration result in a ValidationException
.
*
*
* TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the
* epoch time value stored in the attribute is less than the current time, the item is marked as expired and
* subsequently deleted.
*
*
*
* The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.
*
*
*
* DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data
* operations.
*
*
*
* DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item
* gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been
* deleted will still show up in reads, queries, and scans.
*
*
*
* As items are deleted, they are removed from any local secondary index and global secondary index immediately in
* the same eventually consistent way as a standard delete operation.
*
*
* For more information, see Time To Live in the Amazon
* DynamoDB Developer Guide.
*
*
* @param updateTimeToLiveRequest
* Represents the input of an UpdateTimeToLive
operation.
* @return Result of the UpdateTimeToLive operation returned by the service.
* @throws ResourceInUseException
* The operation conflicts with the resource's availability. For example, you attempted to recreate an
* existing table, or tried to delete a table currently in the CREATING
state.
* @throws ResourceNotFoundException
* The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
* @throws LimitExceededException
* There is no limit to the number of daily on-demand backups that can be taken.
*
* Up to 50 simultaneous table operations are allowed per account. These operations include
* CreateTable
, UpdateTable
, DeleteTable
,
* UpdateTimeToLive
, RestoreTableFromBackup
, and
* RestoreTableToPointInTime
.
*
*
* The only exception is when you are creating a table with one or more secondary indexes. You can have up
* to 25 such requests running at a time; however, if the table or index specifications are complex,
* DynamoDB might temporarily reduce the number of concurrent operations.
*
*
* There is a soft account quota of 256 tables.
* @throws InternalServerErrorException
* An error occurred on the server side.
* @sample AmazonDynamoDB.UpdateTimeToLive
* @see AWS API
* Documentation
*/
@Override
public UpdateTimeToLiveResult updateTimeToLive(UpdateTimeToLiveRequest request) {
request = beforeClientExecution(request);
return executeUpdateTimeToLive(request);
}
@SdkInternalApi
final UpdateTimeToLiveResult executeUpdateTimeToLive(UpdateTimeToLiveRequest updateTimeToLiveRequest) {
ExecutionContext executionContext = createExecutionContext(updateTimeToLiveRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateTimeToLiveRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateTimeToLiveRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "DynamoDB");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateTimeToLive");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
if (endpointDiscoveryEnabled) {
cachedEndpoint = cache.get(awsCredentialsProvider.getCredentials().getAWSAccessKeyId(), false, endpoint);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateTimeToLiveResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext, cachedEndpoint, null);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* Returns additional metadata for a previously executed successful, request, typically used for debugging issues
* where a service isn't acting as expected. This data isn't considered part of the result data returned by an
* operation, so it's available through this separate, diagnostic interface.
*
* Response metadata is only cached for a limited period of time, so if you need to access this extra diagnostic
* information for an executed request, you should use this method to retrieve it as soon as possible after
* executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none is available.
*/
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
@Override
protected final boolean calculateCRC32FromCompressedData() {
return true;
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private Response invoke(Request request, HttpResponseHandler> responseHandler,
ExecutionContext executionContext) {
return invoke(request, responseHandler, executionContext, null, null);
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private Response invoke(Request request, HttpResponseHandler> responseHandler,
ExecutionContext executionContext, URI cachedEndpoint, URI uriFromEndpointTrait) {
executionContext.setCredentialsProvider(CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider));
return doInvoke(request, responseHandler, executionContext, cachedEndpoint, uriFromEndpointTrait);
}
/**
* Invoke with no authentication. Credentials are not required and any credentials set on the client or request will
* be ignored for this operation.
**/
private Response anonymousInvoke(Request request,
HttpResponseHandler> responseHandler, ExecutionContext executionContext) {
return doInvoke(request, responseHandler, executionContext, null, null);
}
/**
* Invoke the request using the http client. Assumes credentials (or lack thereof) have been configured in the
* ExecutionContext beforehand.
**/
private Response doInvoke(Request request, HttpResponseHandler> responseHandler,
ExecutionContext executionContext, URI discoveredEndpoint, URI uriFromEndpointTrait) {
if (discoveredEndpoint != null) {
request.setEndpoint(discoveredEndpoint);
request.getOriginalRequest().getRequestClientOptions().appendUserAgent("endpoint-discovery");
} else if (uriFromEndpointTrait != null) {
request.setEndpoint(uriFromEndpointTrait);
} else {
request.setEndpoint(endpoint);
}
request.setTimeOffset(timeOffset);
HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(new JsonErrorResponseMetadata());
return client.execute(request, responseHandler, errorResponseHandler, executionContext);
}
@com.amazonaws.annotation.SdkInternalApi
static com.amazonaws.protocol.json.SdkJsonProtocolFactory getProtocolFactory() {
return protocolFactory;
}
@Override
public AmazonDynamoDBWaiters waiters() {
if (waiters == null) {
synchronized (this) {
if (waiters == null) {
waiters = new AmazonDynamoDBWaiters(this);
}
}
}
return waiters;
}
@Override
public void shutdown() {
super.shutdown();
if (waiters != null) {
waiters.shutdown();
}
if (cache != null) {
cache.shutdown();
}
}
}