/*
* Copyright 2015-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://aws.amazon.com/apache2.0
*
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.dynamodbv2.datamodeling;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.retry.RetryUtils;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.BatchLoadRetryStrategy;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.BatchWriteRetryStrategy;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.ConsistentReads;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.SaveBehavior;
import com.amazonaws.services.dynamodbv2.model.AttributeAction;
import com.amazonaws.services.dynamodbv2.model.AttributeDefinition;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemResult;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult;
import com.amazonaws.services.dynamodbv2.model.Condition;
import com.amazonaws.services.dynamodbv2.model.ConditionalCheckFailedException;
import com.amazonaws.services.dynamodbv2.model.ConditionalOperator;
import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteItemRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest;
import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue;
import com.amazonaws.services.dynamodbv2.model.GetItemRequest;
import com.amazonaws.services.dynamodbv2.model.GetItemResult;
import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
import com.amazonaws.services.dynamodbv2.model.KeysAndAttributes;
import com.amazonaws.services.dynamodbv2.model.PutItemRequest;
import com.amazonaws.services.dynamodbv2.model.PutItemResult;
import com.amazonaws.services.dynamodbv2.model.PutRequest;
import com.amazonaws.services.dynamodbv2.model.QueryRequest;
import com.amazonaws.services.dynamodbv2.model.QueryResult;
import com.amazonaws.services.dynamodbv2.model.ReturnValue;
import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType;
import com.amazonaws.services.dynamodbv2.model.ScanRequest;
import com.amazonaws.services.dynamodbv2.model.ScanResult;
import com.amazonaws.services.dynamodbv2.model.Select;
import com.amazonaws.services.dynamodbv2.model.UpdateItemRequest;
import com.amazonaws.services.dynamodbv2.model.UpdateItemResult;
import com.amazonaws.services.dynamodbv2.model.WriteRequest;
import com.amazonaws.services.s3.model.Region;
import com.amazonaws.util.VersionInfoUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import static com.amazonaws.services.dynamodbv2.model.KeyType.HASH;
import static com.amazonaws.services.dynamodbv2.model.KeyType.RANGE;
/**
* Object mapper for domain-object interaction with DynamoDB.
*
* To use, define a domain class that represents an item in a DynamoDB table and
* annotate it with the annotations found in the
* com.amazonaws.services.dynamodbv2.datamodeling package. In order to allow the
* mapper to correctly persist the data, each modeled property in the domain
* class should be accessible via getter and setter methods, and each property
* annotation should be either applied to the getter method or the class field.
* A minimal example using getter annotations:
*
*
* @DynamoDBTable(tableName = "TestTable")
* public class TestClass {
*
* private Long key;
* private double rangeKey;
* private Long version;
*
* private Set<Integer> integerSetAttribute;
*
* @DynamoDBHashKey
* public Long getKey() {
* return key;
* }
*
* public void setKey(Long key) {
* this.key = key;
* }
*
* @DynamoDBRangeKey
* public double getRangeKey() {
* return rangeKey;
* }
*
* public void setRangeKey(double rangeKey) {
* this.rangeKey = rangeKey;
* }
*
* @DynamoDBAttribute(attributeName = "integerSetAttribute")
* public Set<Integer> getIntegerAttribute() {
* return integerSetAttribute;
* }
*
* public void setIntegerAttribute(Set<Integer> integerAttribute) {
* this.integerSetAttribute = integerAttribute;
* }
*
* @DynamoDBVersionAttribute
* public Long getVersion() {
* return version;
* }
*
* public void setVersion(Long version) {
* this.version = version;
* }
* }
*
*
* Save instances of annotated classes to DynamoDB, retrieve them, and delete
* them using the {@link DynamoDBMapper} class, as in the following example.
*
*
* DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
* Long hashKey = 105L;
* double rangeKey = 1.0d;
* TestClass obj = mapper.load(TestClass.class, hashKey, rangeKey);
* obj.getIntegerAttribute().add(42);
* mapper.save(obj);
* mapper.delete(obj);
*
*
* If you don't have your DynamoDB table set up yet, you can use
* {@link DynamoDBMapper#generateCreateTableRequest(Class)} to construct the
* {@link CreateTableRequest} for the table represented by your annotated class.
*
*
* AmazonDynamoDB dynamoDBClient = new AmazonDynamoDBClient();
* DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
* CreateTableRequest req = mapper.generateCreateTableRequest(TestClass.class);
* // Table provision throughput is still required since it cannot be specified in your POJO
* req.setProvisionedThroughput(new ProvisionedThroughput(5L, 5L));
* // Fire off the CreateTableRequest using the low-level client
* dynamoDBClient.createTable(req);
*
*
* When using the save, load, and delete methods, {@link DynamoDBMapper} will
* throw {@link DynamoDBMappingException}s to indicate that domain classes are
* incorrectly annotated or otherwise incompatible with this class. Service
* exceptions will always be propagated as {@link SdkClientException}, and
* DynamoDB-specific subclasses such as {@link ConditionalCheckFailedException}
* will be used when possible.
*
* This class is thread-safe and can be shared between threads. It's also very
* lightweight, so it doesn't need to be.
*
* @see DynamoDBTable
* @see DynamoDBHashKey
* @see DynamoDBRangeKey
* @see DynamoDBAutoGeneratedKey
* @see DynamoDBAttribute
* @see DynamoDBVersionAttribute
* @see DynamoDBIgnore
* @see DynamoDBMarshalling
* @see DynamoDBMapperConfig
*/
public class DynamoDBMapper extends AbstractDynamoDBMapper {
private final AmazonDynamoDB db;
private final DynamoDBMapperModelFactory models;
private final S3Link.Factory s3Links;
private final AttributeTransformer transformer;
/**
* The max back off time for batch get. The configuration for batch write
* has been moved to DynamoDBMapperConfig
*/
static final long MAX_BACKOFF_IN_MILLISECONDS = 1000 * 3;
/** The max number of items allowed in a BatchWrite request */
static final int MAX_ITEMS_PER_BATCH = 25;
/**
* This retry count is applicable only when every batch get item request
* results in no data retrieved from server and the un processed keys is
* same as request items
*/
static final int BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS = 5;
/**
* User agent for requests made using the {@link DynamoDBMapper}.
*/
private static final String USER_AGENT =
DynamoDBMapper.class.getName() + "/" + VersionInfoUtils.getVersion();
private static final String USER_AGENT_BATCH_OPERATION =
DynamoDBMapper.class.getName() + "_batch_operation/" + VersionInfoUtils.getVersion();
private static final Log log = LogFactory.getLog(DynamoDBMapper.class);
/**
* Fail fast when trying to create a subclass of the DynamoDBMapper that
* attempts to override one of the old {@code transformAttributes} methods.
*/
private static void failFastOnIncompatibleSubclass(Class> clazz) {
while (clazz != DynamoDBMapper.class) {
Class>[] classOverride = new Class>[] {
Class.class,
Map.class
};
Class>[] nameOverride = new Class>[] {
String.class,
String.class,
Map.class
};
for (Method method : clazz.getDeclaredMethods()) {
if (method.getName().equals("transformAttributes")) {
Class>[] params = method.getParameterTypes();
if (Arrays.equals(params, classOverride)
|| Arrays.equals(params, nameOverride)) {
throw new IllegalStateException(
"The deprecated transformAttributes method is "
+ "no longer supported as of 1.9.0. Use an "
+ "AttributeTransformer to inject custom "
+ "attribute transformation logic.");
}
}
}
clazz = clazz.getSuperclass();
}
}
/**
* Constructs a new mapper with the service object given, using the default
* configuration.
*
* @param dynamoDB
* The service object to use for all service calls.
* @see DynamoDBMapperConfig#DEFAULT
*/
public DynamoDBMapper(final AmazonDynamoDB dynamoDB) {
this(dynamoDB, DynamoDBMapperConfig.DEFAULT, null, null);
}
/**
* Constructs a new mapper with the service object and configuration given.
*
* @param dynamoDB
* The service object to use for all service calls.
* @param config
* The default configuration to use for all service calls. It can
* be overridden on a per-operation basis.
*/
public DynamoDBMapper(
final AmazonDynamoDB dynamoDB,
final DynamoDBMapperConfig config) {
this(dynamoDB, config, null, null);
}
/**
* Constructs a new mapper with the service object and S3 client cache
* given, using the default configuration.
*
* @param ddb
* The service object to use for all service calls.
* @param s3CredentialProvider
* The credentials provider for accessing S3.
* Relevant only if {@link S3Link} is involved.
* @see DynamoDBMapperConfig#DEFAULT
*/
public DynamoDBMapper(
final AmazonDynamoDB ddb,
final AWSCredentialsProvider s3CredentialProvider) {
this(ddb, DynamoDBMapperConfig.DEFAULT, s3CredentialProvider);
}
/**
* Constructs a new mapper with the given service object, configuration,
* and transform hook.
*
* @param dynamoDB
* the service object to use for all service calls
* @param config
* the default configuration to use for all service calls. It
* can be overridden on a per-operation basis
* @param transformer
* The custom attribute transformer to invoke when serializing or
* deserializing an object.
*/
public DynamoDBMapper(
final AmazonDynamoDB dynamoDB,
final DynamoDBMapperConfig config,
final AttributeTransformer transformer) {
this(dynamoDB, config, transformer, null);
}
/**
* Constructs a new mapper with the service object, configuration, and S3
* client cache given.
*
* @param dynamoDB
* The service object to use for all service calls.
* @param config
* The default configuration to use for all service calls. It can
* be overridden on a per-operation basis.
* @param s3CredentialProvider
* The credentials provider for accessing S3.
* Relevant only if {@link S3Link} is involved.
*/
public DynamoDBMapper(
final AmazonDynamoDB dynamoDB,
final DynamoDBMapperConfig config,
final AWSCredentialsProvider s3CredentialProvider) {
this(dynamoDB, config, null, validate(s3CredentialProvider));
}
/**
* Throws an exception if the given credentials provider is {@code null}.
*/
private static AWSCredentialsProvider validate(
final AWSCredentialsProvider provider) {
if (provider == null) {
throw new IllegalArgumentException(
"s3 credentials provider must not be null");
}
return provider;
}
/**
* Constructor with all parameters.
*
* @param dynamoDB
* The service object to use for all service calls.
* @param config
* The default configuration to use for all service calls. It can
* be overridden on a per-operation basis.
* @param transformer
* The custom attribute transformer to invoke when serializing or
* deserializing an object.
* @param s3CredentialProvider
* The credentials provider for accessing S3.
* Relevant only if {@link S3Link} is involved.
*/
public DynamoDBMapper(
final AmazonDynamoDB dynamoDB,
final DynamoDBMapperConfig config,
final AttributeTransformer transformer,
final AWSCredentialsProvider s3CredentialsProvider) {
super(config);
failFastOnIncompatibleSubclass(getClass());
this.db = dynamoDB;
this.transformer = transformer;
this.s3Links = S3Link.Factory.of(s3CredentialsProvider);
this.models = StandardModelFactories.of(this.s3Links);
}
@Override
public DynamoDBMapperTableModel getTableModel(Class clazz, DynamoDBMapperConfig config) {
return this.models.getTableFactory(config).getTable(clazz);
}
@Override
public T load(T keyObject, DynamoDBMapperConfig config) {
@SuppressWarnings("unchecked")
Class clazz = (Class) keyObject.getClass();
config = mergeConfig(config);
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
String tableName = getTableName(clazz, keyObject, config);
GetItemRequest rq = new GetItemRequest()
.withRequestMetricCollector(config.getRequestMetricCollector());
Map key = model.convertKey(keyObject);
rq.setKey(key);
rq.setTableName(tableName);
rq.setConsistentRead(config.getConsistentReads() == ConsistentReads.CONSISTENT);
GetItemResult item = db.getItem(applyUserAgent(rq));
Map itemAttributes = item.getItem();
if ( itemAttributes == null ) {
return null;
}
T object = privateMarshallIntoObject(
toParameters(itemAttributes, clazz, tableName, config));
return object;
}
@Override
public T load(Class clazz, Object hashKey, Object rangeKey, DynamoDBMapperConfig config) {
config = mergeConfig(config);
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
T keyObject = model.createKey(hashKey, rangeKey);
return load(keyObject, config);
}
@Override
public T marshallIntoObject(Class clazz, Map itemAttributes, DynamoDBMapperConfig config) {
config = mergeConfig(config);
String tableName = getTableName(clazz, config);
return privateMarshallIntoObject(
toParameters(itemAttributes, clazz, tableName, config));
}
/**
* The one true implementation of marshallIntoObject.
*/
private T privateMarshallIntoObject(
AttributeTransformer.Parameters parameters) {
Class clazz = parameters.getModelClass();
Map values = untransformAttributes(parameters);
final DynamoDBMapperTableModel model = getTableModel(clazz, parameters.getMapperConfig());
return model.unconvert(values);
}
@Override
public List marshallIntoObjects(Class clazz, List> itemAttributes, DynamoDBMapperConfig config) {
config = mergeConfig(config);
List result = new ArrayList(itemAttributes.size());
for (Map item : itemAttributes) {
result.add(marshallIntoObject(clazz, item));
}
return result;
}
/**
* A replacement for {@link #marshallIntoObjects(Class, List)} that takes
* an extra set of parameters to be tunneled through to
* {@code privateMarshalIntoObject} (if nothing along the way is
* overridden). It's package-private because some of the Paginated*List
* classes call back into it, but final because no one, even in this
* package, should ever override it.
*/
final List marshallIntoObjects(
final List> parameters
) {
List result = new ArrayList(parameters.size());
for (AttributeTransformer.Parameters entry : parameters) {
result.add(privateMarshallIntoObject(entry));
}
return result;
}
@Override
public void save(T object,
DynamoDBSaveExpression saveExpression,
final DynamoDBMapperConfig config) {
final DynamoDBMapperConfig finalConfig = mergeConfig(config);
@SuppressWarnings("unchecked")
Class clazz = (Class) object.getClass();
String tableName = getTableName(clazz, object, finalConfig);
final DynamoDBMapperTableModel model = getTableModel(clazz, finalConfig);
/*
* We force a putItem request instead of updateItem request either when
* CLOBBER is configured, or part of the primary key of the object needs
* to be auto-generated.
*/
boolean forcePut = (finalConfig.getSaveBehavior() == SaveBehavior.CLOBBER)
|| anyKeyGeneratable(model, object, finalConfig.getSaveBehavior());
SaveObjectHandler saveObjectHandler;
if (forcePut) {
saveObjectHandler = this.new SaveObjectHandler(clazz, object,
tableName, finalConfig, saveExpression) {
@Override
protected void onPrimaryKeyAttributeValue(String attributeName,
AttributeValue keyAttributeValue) {
/* Treat key values as common attribute value updates. */
getAttributeValueUpdates().put(attributeName,
new AttributeValueUpdate().withValue(keyAttributeValue)
.withAction("PUT"));
}
/* Use default implementation of onNonKeyAttribute(...) */
@Override
protected void onNullNonKeyAttribute(String attributeName) {
/* When doing a force put, we can safely ignore the null-valued attributes. */
return;
}
@Override
protected void executeLowLevelRequest() {
/* Send a putItem request */
doPutItem();
}
};
} else {
saveObjectHandler = this.new SaveObjectHandler(clazz, object,
tableName, finalConfig, saveExpression) {
@Override
protected void onPrimaryKeyAttributeValue(String attributeName,
AttributeValue keyAttributeValue) {
/* Put it in the key collection which is later used in the updateItem request. */
getPrimaryKeyAttributeValues().put(attributeName, keyAttributeValue);
}
@Override
protected void onNonKeyAttribute(String attributeName,
AttributeValue currentValue) {
/* If it's a set attribute and the mapper is configured with APPEND_SET,
* we do an "ADD" update instead of the default "PUT".
*/
if (getLocalSaveBehavior() == SaveBehavior.APPEND_SET) {
if (currentValue.getBS() != null
|| currentValue.getNS() != null
|| currentValue.getSS() != null) {
getAttributeValueUpdates().put(
attributeName,
new AttributeValueUpdate().withValue(
currentValue).withAction("ADD"));
return;
}
}
/* Otherwise, we do the default "PUT" update. */
super.onNonKeyAttribute(attributeName, currentValue);
}
@Override
protected void onNullNonKeyAttribute(String attributeName) {
/*
* If UPDATE_SKIP_NULL_ATTRIBUTES or APPEND_SET is
* configured, we don't delete null value attributes.
*/
if (getLocalSaveBehavior() == SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES
|| getLocalSaveBehavior() == SaveBehavior.APPEND_SET) {
return;
}
else {
/* Delete attributes that are set as null in the object. */
getAttributeValueUpdates()
.put(attributeName,
new AttributeValueUpdate()
.withAction("DELETE"));
}
}
@Override
protected void executeLowLevelRequest() {
UpdateItemResult updateItemResult = doUpdateItem();
// The UpdateItem request is specified to return ALL_NEW
// attributes of the affected item. So if the returned
// UpdateItemResult does not include any ReturnedAttributes,
// it indicates the UpdateItem failed silently (e.g. the
// key-only-put nightmare -
// https://forums.aws.amazon.com/thread.jspa?threadID=86798&tstart=25),
// in which case we should re-send a PutItem
// request instead.
if (updateItemResult.getAttributes() == null
|| updateItemResult.getAttributes().isEmpty()) {
// Before we proceed with PutItem, we need to put all
// the key attributes (prepared for the
// UpdateItemRequest) into the AttributeValueUpdates
// collection.
for (String keyAttributeName : getPrimaryKeyAttributeValues().keySet()) {
getAttributeValueUpdates().put(keyAttributeName,
new AttributeValueUpdate()
.withValue(getPrimaryKeyAttributeValues().get(keyAttributeName))
.withAction("PUT"));
}
doPutItem();
}
}
};
}
saveObjectHandler.execute();
}
/**
* The handler for saving object using DynamoDBMapper. Caller should
* implement the abstract methods to provide the expected behavior on each
* scenario, and this handler will take care of all the other basic workflow
* and common operations.
*/
protected abstract class SaveObjectHandler {
protected final Object object;
protected final Class> clazz;
private final String tableName;
private final DynamoDBMapperConfig saveConfig;
private final Map primaryKeys;
private final Map updateValues;
/**
* Any expected value conditions specified by the implementation of
* DynamoDBMapper, e.g. value assertions on versioned attributes.
*/
private final Map internalExpectedValueAssertions;
/**
* Additional expected value conditions specified by the user.
*/
protected final Map userProvidedExpectedValueConditions;
/**
* Condition operator on the additional expected value conditions
* specified by the user.
*/
protected final String userProvidedConditionOperator;
private final List inMemoryUpdates;
/**
* Constructs a handler for saving the specified model object.
*
* @param object The model object to be saved.
* @param clazz The domain class of the object.
* @param tableName The table name.
* @param saveConifg The mapper configuration used for this save.
* @param saveExpression The save expression, including the user-provided conditions and an optional logic operator.
*/
public SaveObjectHandler(
Class> clazz,
Object object,
String tableName,
DynamoDBMapperConfig saveConfig,
DynamoDBSaveExpression saveExpression) {
this.clazz = clazz;
this.object = object;
this.tableName = tableName;
this.saveConfig = saveConfig;
if (saveExpression != null) {
userProvidedExpectedValueConditions = saveExpression
.getExpected();
userProvidedConditionOperator = saveExpression
.getConditionalOperator();
} else {
userProvidedExpectedValueConditions = null;
userProvidedConditionOperator = null;
}
updateValues = new HashMap();
internalExpectedValueAssertions = new HashMap();
inMemoryUpdates = new LinkedList();
primaryKeys = new HashMap();
}
/**
* The general workflow of a save operation.
*/
public void execute() {
final DynamoDBMapperTableModel model = getTableModel((Class)clazz, saveConfig);
for ( final DynamoDBMapperFieldModel field : model.fields() ) {
if ( canGenerate(model, object, getLocalSaveBehavior(), field) ) {
if ( field.keyType() != null || field.indexed() ) {
onAutoGenerateAssignableKey(field);
} else if ( field.versioned() ) {
onVersionAttribute(field);
} else {
onAutoGenerate(field);
}
} else if ( field.keyType() != null ) {
AttributeValue newAttributeValue = field.convert(field.get(object));
if ( newAttributeValue == null ) {
throw new DynamoDBMappingException(
clazz.getSimpleName() + "[" + field.name() + "]; null or empty value for primary key"
);
}
onPrimaryKeyAttributeValue(field.name(), newAttributeValue);
} else {
AttributeValue currentValue = field.convert(field.get(object));
if ( currentValue != null ) {
onNonKeyAttribute(field.name(), currentValue);
} else {
onNullNonKeyAttribute(field.name());
}
}
}
/*
* Execute the implementation of the low level request.
*/
executeLowLevelRequest();
/*
* Finally, after the service call has succeeded, update the
* in-memory object with new field values as appropriate. This
* currently takes into account of auto-generated keys and versioned
* attributes.
*/
for ( ValueUpdate update : inMemoryUpdates ) {
update.apply();
}
}
/**
* Implement this method to do the necessary operations when a primary key
* attribute is set with some value.
*
* @param attributeName
* The name of the primary key attribute.
* @param keyAttributeValue
* The AttributeValue of the primary key attribute as specified in
* the object.
*/
protected abstract void onPrimaryKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue);
/**
* Implement this method for necessary operations when a non-key
* attribute is set a non-null value in the object.
* The default implementation simply adds a "PUT" update for the given attribute.
*
* @param attributeName
* The name of the non-key attribute.
* @param currentValue
* The updated value of the given attribute.
*/
protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) {
updateValues.put(attributeName, new AttributeValueUpdate()
.withValue(currentValue).withAction("PUT"));
}
/**
* Implement this method for necessary operations when a non-key
* attribute is set null in the object.
*
* @param attributeName
* The name of the non-key attribute.
*/
protected abstract void onNullNonKeyAttribute(String attributeName);
/**
* Implement this method to send the low-level request that is necessary
* to complete the save operation.
*/
protected abstract void executeLowLevelRequest();
/** Get the SaveBehavior used locally for this save operation. **/
protected SaveBehavior getLocalSaveBehavior() {
return saveConfig.getSaveBehavior();
}
/** Get the table name **/
protected String getTableName() {
return tableName;
}
/** Get the map of all the specified primamry keys of the saved object. **/
protected Map getPrimaryKeyAttributeValues() {
return primaryKeys;
}
/** Get the map of AttributeValueUpdate on each modeled attribute. **/
protected Map getAttributeValueUpdates() {
return updateValues;
}
/**
* Merge and return all the expected value conditions (either
* user-specified or imposed by the internal implementation of
* DynamoDBMapper) for this save operation.
*/
protected Map mergeExpectedAttributeValueConditions() {
return DynamoDBMapper.mergeExpectedAttributeValueConditions(
internalExpectedValueAssertions,
userProvidedExpectedValueConditions,
userProvidedConditionOperator);
}
/** Get the list of all the necessary in-memory update on the object. **/
protected List getInMemoryUpdates() {
return inMemoryUpdates;
}
/**
* Save the item using a UpdateItem request. The handler will call this
* method if
*
* CLOBBER configuration is not being used;
* AND the item does not contain auto-generated key value;
*
*
* The ReturnedValues parameter for the UpdateItem request is set as
* ALL_NEW, which means the service should return all of the attributes
* of the new version of the item after the update. The handler will use
* the returned attributes to detect silent failure on the server-side.
*/
protected UpdateItemResult doUpdateItem() {
UpdateItemRequest req = new UpdateItemRequest()
.withTableName(getTableName())
.withKey(getPrimaryKeyAttributeValues())
.withAttributeUpdates(
transformAttributeUpdates(
this.clazz,
getTableName(),
getPrimaryKeyAttributeValues(),
getAttributeValueUpdates(),
saveConfig))
.withExpected(mergeExpectedAttributeValueConditions())
.withConditionalOperator(userProvidedConditionOperator)
.withReturnValues(ReturnValue.ALL_NEW)
.withRequestMetricCollector(saveConfig.getRequestMetricCollector());
return db.updateItem(applyUserAgent(req));
}
/**
* Save the item using a PutItem request. The handler will call this
* method if
*
* CLOBBER configuration is being used;
* OR the item contains auto-generated key value;
* OR an UpdateItem request has silently failed (200 response with
* no affected attribute), which indicates the key-only-put scenario
* that we used to handle by the keyOnlyPut(...) hack.
*
*/
protected PutItemResult doPutItem() {
Map attributeValues = convertToItem(getAttributeValueUpdates());
attributeValues = transformAttributes(
toParameters(attributeValues,
this.clazz,
getTableName(),
saveConfig));
PutItemRequest req = new PutItemRequest()
.withTableName(getTableName())
.withItem(attributeValues)
.withExpected(mergeExpectedAttributeValueConditions())
.withConditionalOperator(userProvidedConditionOperator)
.withRequestMetricCollector(saveConfig.getRequestMetricCollector());
return db.putItem(applyUserAgent(req));
}
/**
* Auto-generates the attribute value.
* @param mapping The mapping details.
*/
private void onAutoGenerate(DynamoDBMapperFieldModel field) {
AttributeValue value = field.convert(field.generate(field.get(object)));
updateValues.put(field.name(), new AttributeValueUpdate().withAction("PUT").withValue(value));
inMemoryUpdates.add(new ValueUpdate(field, value, object));
}
/**
* Auto-generates the key.
*/
private void onAutoGenerateAssignableKey(DynamoDBMapperFieldModel field) {
// Generate the new key value first, then ensure it doesn't exist.
onAutoGenerate(field);
if (getLocalSaveBehavior() != SaveBehavior.CLOBBER
&& !internalExpectedValueAssertions.containsKey(field.name())
&& field.getGenerateStrategy() != DynamoDBAutoGenerateStrategy.ALWAYS) {
// Add an expect clause to make sure that the item
// doesn't already exist, since it's supposed to be new
internalExpectedValueAssertions.put(field.name(),
new ExpectedAttributeValue().withExists(false));
}
}
/**
* Auto-generates the version.
* @param mapping The mapping details.
*/
private void onVersionAttribute(DynamoDBMapperFieldModel field) {
if ( getLocalSaveBehavior() != SaveBehavior.CLOBBER
&& !internalExpectedValueAssertions.containsKey(field.name())) {
// First establish the expected (current) value for the
// update call
// For new objects, insist that the value doesn't exist.
// For existing ones, insist it has the old value.
final Object current = field.get(object);
if (current == null) {
internalExpectedValueAssertions.put(field.name(),
new ExpectedAttributeValue().withExists(false));
} else {
internalExpectedValueAssertions.put(field.name(),
new ExpectedAttributeValue().withExists(true).withValue(field.convert(current)));
}
}
// Generate the new version value
onAutoGenerate(field);
}
/**
* Converts the {@link AttributeValueUpdate} map given to an equivalent
* {@link AttributeValue} map.
*/
private Map convertToItem(Map putValues) {
Map map = new HashMap();
for ( Entry entry : putValues.entrySet() ) {
String attributeName = entry.getKey();
AttributeValue attributeValue = entry.getValue().getValue();
String attributeAction = entry.getValue().getAction();
/*
* AttributeValueUpdate allows nulls for its values, since they are
* semantically meaningful. AttributeValues never have null values.
*/
if ( attributeValue != null
&& !AttributeAction.DELETE.toString().equals(attributeAction)) {
map.put(attributeName, attributeValue);
}
}
return map;
}
private Map transformAttributeUpdates(
final Class> clazz,
final String tableName,
final Map keys,
final Map updateValues,
final DynamoDBMapperConfig config
) {
Map item = convertToItem(updateValues);
HashSet keysAdded = new HashSet();
for (Map.Entry e : keys.entrySet()) {
if (!item.containsKey(e.getKey())) {
keysAdded.add(e.getKey());
item.put(e.getKey(), e.getValue());
}
}
AttributeTransformer.Parameters> parameters =
toParameters(item, true, clazz, tableName, config);
String hashKey = parameters.getHashKeyName();
if (!item.containsKey(hashKey)) {
item.put(hashKey, keys.get(hashKey));
}
item = transformAttributes(parameters);
for(Map.Entry entry: item.entrySet()) {
if (keysAdded.contains(entry.getKey())) {
// This was added in for context before calling
// transformAttributes, but isn't actually being changed.
continue;
}
AttributeValueUpdate update = updateValues.get(entry.getKey());
if (update != null) {
update.getValue()
.withB(entry.getValue().getB())
.withBS(entry.getValue().getBS())
.withN(entry.getValue().getN())
.withNS(entry.getValue().getNS())
.withS(entry.getValue().getS())
.withSS(entry.getValue().getSS())
.withM(entry.getValue().getM())
.withL(entry.getValue().getL())
.withNULL(entry.getValue().getNULL())
.withBOOL(entry.getValue().getBOOL());
} else {
updateValues.put(entry.getKey(),
new AttributeValueUpdate(entry.getValue(),
"PUT"));
}
}
return updateValues;
}
}
@Override
public void delete(T object, DynamoDBDeleteExpression deleteExpression, DynamoDBMapperConfig config) {
config = mergeConfig(config);
@SuppressWarnings("unchecked")
Class clazz = (Class) object.getClass();
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
String tableName = getTableName(clazz, object, config);
Map key = model.convertKey(object);
/*
* If there is a version field, make sure we assert its value. If the
* version field is null (only should happen in unusual circumstances),
* pretend it doesn't have a version field after all.
*/
Map internalAssertions = new HashMap();
if ( config.getSaveBehavior() != SaveBehavior.CLOBBER && model.versioned() ) {
for ( final DynamoDBMapperFieldModel field : model.versions() ) {
final AttributeValue current = field.getAndConvert(object);
if (current == null) {
internalAssertions.put(field.name(), new ExpectedAttributeValue(false));
} else {
internalAssertions.put(field.name(), new ExpectedAttributeValue(true).withValue(current));
}
break;
}
}
DeleteItemRequest req = new DeleteItemRequest().withKey(key)
.withTableName(tableName).withExpected(internalAssertions)
.withRequestMetricCollector(config.getRequestMetricCollector());
if (deleteExpression != null) {
String conditionalExpression = deleteExpression.getConditionExpression();
if (conditionalExpression != null) {
if (internalAssertions != null && !internalAssertions.isEmpty()) {
throw new SdkClientException(
"Condition Expressions cannot be used if a versioned attribute is present");
}
req = req
.withConditionExpression(conditionalExpression)
.withExpressionAttributeNames(
deleteExpression.getExpressionAttributeNames())
.withExpressionAttributeValues(
deleteExpression.getExpressionAttributeValues());
}
req = req.withExpected(
mergeExpectedAttributeValueConditions(internalAssertions,
deleteExpression.getExpected(),
deleteExpression.getConditionalOperator()))
.withConditionalOperator(
deleteExpression.getConditionalOperator());
}
db.deleteItem(applyUserAgent(req));
}
@Override
public List batchWrite(Iterable extends Object> objectsToWrite,
Iterable extends Object> objectsToDelete,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
List totalFailedBatches = new LinkedList();
StringListMap requestItems = new StringListMap();
List inMemoryUpdates = new LinkedList();
for ( Object toWrite : objectsToWrite ) {
Class clazz = (Class)toWrite.getClass();
String tableName = getTableName(clazz, toWrite, config);
Map attributeValues = new HashMap();
// Look at every getter and construct a value object for it
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
for ( final DynamoDBMapperFieldModel field : model.fields() ) {
AttributeValue currentValue = null;
if ( canGenerate(model, toWrite, config.getSaveBehavior(), field) && !field.versioned() ) {
currentValue = field.convert(field.generate(field.get(toWrite)));
inMemoryUpdates.add(new ValueUpdate(field, currentValue, toWrite));
} else {
currentValue = field.convert(field.get(toWrite));
}
if ( currentValue != null ) {
attributeValues.put(field.name(), currentValue);
}
}
if ( !requestItems.containsKey(tableName) ) {
requestItems.put(tableName, new LinkedList());
}
AttributeTransformer.Parameters> parameters =
toParameters(attributeValues, clazz, tableName, config);
requestItems.add(tableName, new WriteRequest(new PutRequest(transformAttributes(parameters))));
}
for ( Object toDelete : objectsToDelete ) {
Class clazz = (Class)toDelete.getClass();
String tableName = getTableName(clazz, toDelete, config);
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
Map key = model.convertKey(toDelete);
requestItems.add(tableName, new WriteRequest(new DeleteRequest(key)));
}
// Break into chunks of 25 items and make service requests to DynamoDB
for (final StringListMap batch : requestItems.subMaps(MAX_ITEMS_PER_BATCH, true)) {
List failedBatches = writeOneBatch(batch, config.getBatchWriteRetryStrategy());
if (failedBatches != null) {
totalFailedBatches.addAll(failedBatches);
// If contains throttling exception, we do a backoff
if (containsThrottlingException(failedBatches)) {
pause(config.getBatchWriteRetryStrategy().getDelayBeforeRetryUnprocessedItems(
Collections.unmodifiableMap(batch), 0));
}
}
}
// Once the entire batch is processed, update assigned keys in memory
for ( ValueUpdate update : inMemoryUpdates ) {
update.apply();
}
return totalFailedBatches;
}
/**
* Process one batch of requests(max 25). It will divide the batch if
* receives request too large exception(the total size of the request is beyond 1M).
*/
private List writeOneBatch(
StringListMap batch,
BatchWriteRetryStrategy batchWriteRetryStrategy) {
List failedBatches = new LinkedList();
FailedBatch failedBatch = doBatchWriteItemWithRetry(batch, batchWriteRetryStrategy);
if (failedBatch != null) {
// If the exception is request entity too large, we divide the batch
// into smaller parts.
if (failedBatch.isRequestEntityTooLarge()) {
// If only one item left, the item size must beyond 64k, which
// exceedes the limit.
if (failedBatch.size() == 1) {
failedBatches.add(failedBatch);
} else {
for (final StringListMap subBatch : batch.subMaps(2, false)) {
failedBatches.addAll(writeOneBatch(subBatch, batchWriteRetryStrategy));
}
}
} else {
failedBatches.add(failedBatch);
}
}
return failedBatches;
}
/**
* Check whether there are throttling exception in the failed batches.
*/
private boolean containsThrottlingException (List failedBatches) {
for (FailedBatch failedBatch : failedBatches) {
if (failedBatch.isThrottling()) {
return true;
}
}
return false;
}
/**
* Continue trying to process the batch and retry on UnproccessedItems as
* according to the specified BatchWriteRetryStrategy
*/
private FailedBatch doBatchWriteItemWithRetry(
Map> batch,
BatchWriteRetryStrategy batchWriteRetryStrategy) {
BatchWriteItemResult result = null;
int retries = 0;
int maxRetries = batchWriteRetryStrategy
.getMaxRetryOnUnprocessedItems(Collections
.unmodifiableMap(batch));
FailedBatch failedBatch = null;
Map> pendingItems = batch;
while (true) {
try {
result = db.batchWriteItem(applyBatchOperationUserAgent(
new BatchWriteItemRequest().withRequestItems(pendingItems)));
} catch (Exception e) {
failedBatch = new FailedBatch();
failedBatch.setUnprocessedItems(pendingItems);
failedBatch.setException(e);
return failedBatch;
}
pendingItems = result.getUnprocessedItems();
if (pendingItems.size() > 0) {
// return pendingItems as a FailedBatch if we have exceeded max retry
if (maxRetries >= 0 && retries >= maxRetries) {
failedBatch = new FailedBatch();
failedBatch.setUnprocessedItems(pendingItems);
failedBatch.setException(null);
return failedBatch;
}
pause(batchWriteRetryStrategy.getDelayBeforeRetryUnprocessedItems(
Collections.unmodifiableMap(pendingItems), retries));
retries++;
} else {
break;
}
}
return failedBatch;
}
@Override
public Map> batchLoad(Iterable extends Object> itemsToGet, DynamoDBMapperConfig config) {
config = mergeConfig(config);
boolean consistentReads = (config.getConsistentReads() == ConsistentReads.CONSISTENT);
if (itemsToGet == null) {
return new HashMap>();
}
Map requestItems = new HashMap();
Map> classesByTableName = new HashMap>();
Map> resultSet = new HashMap>();
int count = 0;
for ( Object keyObject : itemsToGet ) {
Class clazz = (Class)keyObject.getClass();
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
String tableName = getTableName(clazz, keyObject, config);
classesByTableName.put(tableName, clazz);
if ( !requestItems.containsKey(tableName) ) {
requestItems.put(
tableName,
new KeysAndAttributes().withConsistentRead(consistentReads).withKeys(
new LinkedList>()));
}
requestItems.get(tableName).getKeys().add(model.convertKey(keyObject));
// Reach the maximum number which can be handled in a single batchGet
if ( ++count == 100 ) {
processBatchGetRequest(classesByTableName, requestItems, resultSet, config);
requestItems.clear();
count = 0;
}
}
if ( count > 0 ) {
processBatchGetRequest(classesByTableName, requestItems, resultSet, config);
}
return resultSet;
}
@Override
public Map> batchLoad(Map, List> itemsToGet, DynamoDBMapperConfig config) {
config = mergeConfig(config);
List keys = new ArrayList();
if ( itemsToGet != null ) {
for ( Class> clazz : itemsToGet.keySet() ) {
if ( itemsToGet.get(clazz) != null ) {
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
for ( KeyPair keyPair : itemsToGet.get(clazz) ) {
keys.add(model.createKey(keyPair.getHashKey(), keyPair.getRangeKey()));
}
}
}
}
return batchLoad(keys, config);
}
/**
* @param config never null
*/
private void processBatchGetRequest(
final Map> classesByTableName,
final Map requestItems,
final Map> resultSet,
final DynamoDBMapperConfig config) {
BatchGetItemResult batchGetItemResult = null;
BatchGetItemRequest batchGetItemRequest = new BatchGetItemRequest()
.withRequestMetricCollector(config.getRequestMetricCollector());
batchGetItemRequest.setRequestItems(requestItems);
BatchLoadRetryStrategy batchLoadStrategy = config.getBatchLoadRetryStrategy();
BatchLoadContext batchLoadContext = new BatchLoadContext(batchGetItemRequest);
int retries = 0;
do {
if ( batchGetItemResult != null ) {
retries++;
batchLoadContext.setRetriesAttempted(retries);
if (!isNullOrEmpty(batchGetItemResult.getUnprocessedKeys())) {
pause(batchLoadStrategy.getDelayBeforeNextRetry(batchLoadContext));
batchGetItemRequest.setRequestItems(
batchGetItemResult.getUnprocessedKeys());
}
}
batchGetItemResult = db.batchGetItem(
applyBatchOperationUserAgent(batchGetItemRequest));
Map>> responses = batchGetItemResult.getResponses();
for ( String tableName : responses.keySet() ) {
List objects = null;
if ( resultSet.get(tableName) != null ) {
objects = resultSet.get(tableName);
} else {
objects = new LinkedList();
}
Class> clazz = classesByTableName.get(tableName);
for ( Map item : responses.get(tableName) ) {
AttributeTransformer.Parameters> parameters =
toParameters(item, clazz, tableName, config);
objects.add(privateMarshallIntoObject(parameters));
}
resultSet.put(tableName, objects);
}
batchLoadContext.setBatchGetItemResult(batchGetItemResult);
// the number of unprocessed keys and Batch Load Strategy will drive the number of retries
} while ( batchLoadStrategy.shouldRetry(batchLoadContext) );
if (!isNullOrEmpty(batchGetItemResult.getUnprocessedKeys())) {
throw new BatchGetItemException(
"The BatchGetItemResult has unprocessed keys after max retry attempts. Catch the BatchGetItemException to get the list of unprocessed keys.",
batchGetItemResult.getUnprocessedKeys(), resultSet);
}
}
private static boolean isNullOrEmpty(Map map) {
return map == null || map.isEmpty();
}
/**
* Determnes if any of the primary keys require auto-generation.
*/
private static boolean anyKeyGeneratable(
final DynamoDBMapperTableModel model,
final T object,
final SaveBehavior saveBehavior
) {
for (final DynamoDBMapperFieldModel field : model.keys()) {
if (canGenerate(model, object, saveBehavior, field)) {
return true;
}
}
return false;
}
/**
* Determines if the mapping value can be auto-generated.
*/
private static boolean canGenerate(
final DynamoDBMapperTableModel model,
final T object,
final SaveBehavior saveBehavior,
final DynamoDBMapperFieldModel field
) {
if (field.getGenerateStrategy() == null) {
return false;
} else if (field.getGenerateStrategy() == DynamoDBAutoGenerateStrategy.ALWAYS) {
return true;
} else if (field.get(object) != null) {
return false;
} else if (field.keyType() != null || field.indexed()) {
return true;
} else if (saveBehavior == SaveBehavior.CLOBBER) {
return true;
} else if (saveBehavior == SaveBehavior.UPDATE) {
return true;
} else if (anyKeyGeneratable(model, object, saveBehavior)) {
return true;
}
return false;
}
private final class ValueUpdate {
private final DynamoDBMapperFieldModel field;
private final AttributeValue newValue;
private final Object target;
public ValueUpdate(
DynamoDBMapperFieldModel field,
AttributeValue newValue,
Object target) {
this.field = field;
this.newValue = newValue;
this.target = target;
}
public void apply() {
field.set(target, field.unconvert(newValue));
}
}
@Override
public PaginatedScanList scan(Class clazz,
DynamoDBScanExpression scanExpression,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config);
ScanResult scanResult = db.scan(applyUserAgent(scanRequest));
return new PaginatedScanList(this, clazz, db, scanRequest, scanResult, config.getPaginationLoadingStrategy(), config);
}
@Override
public PaginatedParallelScanList parallelScan(Class clazz,
DynamoDBScanExpression scanExpression,
int totalSegments,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
// Create hard copies of the original scan request with difference segment number.
List parallelScanRequests = createParallelScanRequestsFromExpression(clazz, scanExpression, totalSegments, config);
ParallelScanTask parallelScanTask = new ParallelScanTask(db, parallelScanRequests);
return new PaginatedParallelScanList(this, clazz, db, parallelScanTask, config.getPaginationLoadingStrategy(), config);
}
@Override
public ScanResultPage scanPage(Class clazz,
DynamoDBScanExpression scanExpression,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config);
ScanResult scanResult = db.scan(applyUserAgent(scanRequest));
ScanResultPage result = new ScanResultPage();
List> parameters =
toParameters(scanResult.getItems(), clazz, scanRequest.getTableName(), config);
result.setResults(marshallIntoObjects(parameters));
result.setLastEvaluatedKey(scanResult.getLastEvaluatedKey());
result.setCount(scanResult.getCount());
result.setScannedCount(scanResult.getScannedCount());
result.setConsumedCapacity(scanResult.getConsumedCapacity());
return result;
}
@Override
public PaginatedQueryList query(Class clazz,
DynamoDBQueryExpression queryExpression,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config);
QueryResult queryResult = db.query(applyUserAgent(queryRequest));
return new PaginatedQueryList(this, clazz, db, queryRequest, queryResult, config.getPaginationLoadingStrategy(), config);
}
@Override
public QueryResultPage queryPage(Class clazz,
DynamoDBQueryExpression queryExpression,
DynamoDBMapperConfig config) {
config = mergeConfig(config);
QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config);
QueryResult queryResult = db.query(applyUserAgent(queryRequest));
QueryResultPage result = new QueryResultPage();
List> parameters =
toParameters(queryResult.getItems(), clazz, queryRequest.getTableName(), config);
result.setResults(marshallIntoObjects(parameters));
result.setLastEvaluatedKey(queryResult.getLastEvaluatedKey());
result.setCount(queryResult.getCount());
result.setScannedCount(queryResult.getScannedCount());
result.setConsumedCapacity(queryResult.getConsumedCapacity());
return result;
}
@Override
public int count(Class> clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) {
config = mergeConfig(config);
ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config);
scanRequest.setSelect(Select.COUNT);
// Count scans can also be truncated for large datasets
int count = 0;
ScanResult scanResult = null;
do {
scanResult = db.scan(applyUserAgent(scanRequest));
count += scanResult.getCount();
scanRequest.setExclusiveStartKey(scanResult.getLastEvaluatedKey());
} while (scanResult.getLastEvaluatedKey() != null);
return count;
}
@Override
public int count(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) {
config = mergeConfig(config);
QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config);
queryRequest.setSelect(Select.COUNT);
// Count queries can also be truncated for large datasets
int count = 0;
QueryResult queryResult = null;
do {
queryResult = db.query(applyUserAgent(queryRequest));
count += queryResult.getCount();
queryRequest.setExclusiveStartKey(queryResult.getLastEvaluatedKey());
} while (queryResult.getLastEvaluatedKey() != null);
return count;
}
/**
* @param config never null
*/
private ScanRequest createScanRequestFromExpression(Class> clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) {
ScanRequest scanRequest = new ScanRequest();
scanRequest.setTableName(getTableName(clazz, config));
scanRequest.setIndexName(scanExpression.getIndexName());
scanRequest.setScanFilter(scanExpression.getScanFilter());
scanRequest.setLimit(scanExpression.getLimit());
scanRequest.setExclusiveStartKey(scanExpression.getExclusiveStartKey());
scanRequest.setTotalSegments(scanExpression.getTotalSegments());
scanRequest.setSegment(scanExpression.getSegment());
scanRequest.setConditionalOperator(scanExpression.getConditionalOperator());
scanRequest.setFilterExpression(scanExpression.getFilterExpression());
scanRequest.setExpressionAttributeNames(scanExpression
.getExpressionAttributeNames());
scanRequest.setExpressionAttributeValues(scanExpression
.getExpressionAttributeValues());
scanRequest.setRequestMetricCollector(config.getRequestMetricCollector());
scanRequest.setSelect(scanExpression.getSelect());
scanRequest.setProjectionExpression(scanExpression.getProjectionExpression());
scanRequest.setReturnConsumedCapacity(scanExpression.getReturnConsumedCapacity());
scanRequest.setConsistentRead(scanExpression.isConsistentRead());
return applyUserAgent(scanRequest);
}
/**
* @param config never null
*/
private List createParallelScanRequestsFromExpression(Class> clazz, DynamoDBScanExpression scanExpression, int totalSegments, DynamoDBMapperConfig config) {
if (totalSegments < 1) {
throw new IllegalArgumentException("Parallel scan should have at least one scan segment.");
}
if (scanExpression.getExclusiveStartKey() != null) {
log.info("The ExclusiveStartKey parameter specified in the DynamoDBScanExpression is ignored,"
+ " since the individual parallel scan request on each segment is applied on a separate key scope.");
}
if (scanExpression.getSegment() != null || scanExpression.getTotalSegments() != null) {
log.info("The Segment and TotalSegments parameters specified in the DynamoDBScanExpression are ignored.");
}
List parallelScanRequests= new LinkedList();
for (int segment = 0; segment < totalSegments; segment++) {
ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config);
parallelScanRequests.add(scanRequest
.withSegment(segment).withTotalSegments(totalSegments)
.withExclusiveStartKey(null));
}
return parallelScanRequests;
}
protected QueryRequest createQueryRequestFromExpression(Class clazz,
DynamoDBQueryExpression xpress, DynamoDBMapperConfig config) {
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
QueryRequest req = new QueryRequest();
req.setConsistentRead(xpress.isConsistentRead());
req.setTableName(getTableName(clazz, xpress.getHashKeyValues(), config));
req.setIndexName(xpress.getIndexName());
req.setKeyConditionExpression(xpress.getKeyConditionExpression());
processKeyConditions(req, xpress, model);
req.withScanIndexForward(xpress.isScanIndexForward())
.withLimit(xpress.getLimit())
.withExclusiveStartKey(xpress.getExclusiveStartKey())
.withQueryFilter(xpress.getQueryFilter())
.withConditionalOperator(xpress.getConditionalOperator())
.withSelect(xpress.getSelect())
.withProjectionExpression(xpress.getProjectionExpression())
.withFilterExpression(xpress.getFilterExpression())
.withExpressionAttributeNames(xpress.getExpressionAttributeNames())
.withExpressionAttributeValues(xpress.getExpressionAttributeValues())
.withReturnConsumedCapacity(xpress.getReturnConsumedCapacity())
.withRequestMetricCollector(config.getRequestMetricCollector())
;
return applyUserAgent(req);
}
/**
* Utility method for checking the validity of both hash and range key
* conditions. It also tries to infer the correct index name from the POJO
* annotation, if such information is not directly specified by the user.
*
* @param clazz
* The domain class of the queried items.
* @param queryRequest
* The QueryRequest object to be sent to service.
* @param hashKeyConditions
* All the hash key EQ conditions extracted from the POJO object.
* The mapper will choose one of them that could be applied together with
* the user-specified (if any) index name and range key conditions. Or it
* throws error if more than one conditions are applicable for the query.
* @param rangeKeyConditions
* The range conditions specified by the user. We currently only
* allow at most one range key condition.
*/
private static void processKeyConditions(
final QueryRequest queryRequest,
final DynamoDBQueryExpression expression,
final DynamoDBMapperTableModel model
) {
// Hash key (primary or index) condition
final Map hashKeyConditions = new LinkedHashMap();
if (expression.getHashKeyValues() != null) {
for (final DynamoDBMapperFieldModel field : model.fields()) {
if (field.keyType() == HASH || !field.globalSecondaryIndexNames(HASH).isEmpty()) {
final Object value = field.get(expression.getHashKeyValues());
if (value != null) {
hashKeyConditions.put(field.name(), field.eq(value));
}
}
}
}
// Range key (primary or index) conditions
final Map rangeKeyConditions = expression.getRangeKeyConditions();
// There should be least one hash key condition.
final String keyCondExpression = queryRequest.getKeyConditionExpression();
if (keyCondExpression == null) {
if (isNullOrEmpty(hashKeyConditions)) {
throw new IllegalArgumentException(
"Illegal query expression: No hash key condition is found in the query");
}
} else {
if (!isNullOrEmpty(hashKeyConditions)) {
throw new IllegalArgumentException(
"Illegal query expression: Either the hash key conditions or the key condition expression must be specified but not both.");
}
if (!isNullOrEmpty(rangeKeyConditions)) {
throw new IllegalArgumentException(
"Illegal query expression: The range key conditions can only be specified when the key condition expression is not specified.");
}
// key condition expression is in use
return;
}
// We don't allow multiple range key conditions.
if (rangeKeyConditions != null && rangeKeyConditions.size() > 1) {
throw new IllegalArgumentException(
"Illegal query expression: Conditions on multiple range keys ("
+ rangeKeyConditions.keySet().toString()
+ ") are found in the query. DynamoDB service only accepts up to ONE range key condition.");
}
final boolean hasRangeKeyCondition = (rangeKeyConditions != null)
&& (!rangeKeyConditions.isEmpty());
final String userProvidedIndexName = queryRequest.getIndexName();
final String primaryHashKeyName = model.hashKey().name();
// First collect the names of all the global/local secondary indexes that could be applied to this query.
// If the user explicitly specified an index name, we also need to
// 1) check the index is applicable for both hash and range key conditions
// 2) choose one hash key condition if there are more than one of them
boolean hasPrimaryHashKeyCondition = false;
final Map> annotatedGSIsOnHashKeys = new HashMap>();
String hashKeyNameForThisQuery = null;
boolean hasPrimaryRangeKeyCondition = false;
final Set annotatedLSIsOnRangeKey = new HashSet();
final Set annotatedGSIsOnRangeKey = new HashSet();
// Range key condition
String rangeKeyNameForThisQuery = null;
if (hasRangeKeyCondition) {
for (String rangeKeyName : rangeKeyConditions.keySet()) {
rangeKeyNameForThisQuery = rangeKeyName;
final DynamoDBMapperFieldModel rk = model.field(rangeKeyName);
if (rk.keyType() == RANGE) {
hasPrimaryRangeKeyCondition = true;
}
annotatedLSIsOnRangeKey.addAll(rk.localSecondaryIndexNames());
annotatedGSIsOnRangeKey.addAll(rk.globalSecondaryIndexNames(RANGE));
}
if ( !hasPrimaryRangeKeyCondition
&& annotatedLSIsOnRangeKey.isEmpty()
&& annotatedGSIsOnRangeKey.isEmpty()) {
throw new DynamoDBMappingException(
"The query contains a condition on a range key (" +
rangeKeyNameForThisQuery + ") " +
"that is not annotated with either @DynamoDBRangeKey or @DynamoDBIndexRangeKey.");
}
}
final boolean userProvidedLSIWithRangeKeyCondition = (userProvidedIndexName != null)
&& (annotatedLSIsOnRangeKey.contains(userProvidedIndexName));
final boolean hashOnlyLSIQuery = (userProvidedIndexName != null)
&& ( !hasRangeKeyCondition )
&& model.localSecondaryIndex(userProvidedIndexName) != null;
final boolean userProvidedLSI = userProvidedLSIWithRangeKeyCondition || hashOnlyLSIQuery;
final boolean userProvidedGSIWithRangeKeyCondition = (userProvidedIndexName != null)
&& (annotatedGSIsOnRangeKey.contains(userProvidedIndexName));
final boolean hashOnlyGSIQuery = (userProvidedIndexName != null)
&& ( !hasRangeKeyCondition )
&& model.globalSecondaryIndex(userProvidedIndexName) != null;
final boolean userProvidedGSI = userProvidedGSIWithRangeKeyCondition || hashOnlyGSIQuery;
if (userProvidedLSI && userProvidedGSI ) {
throw new DynamoDBMappingException(
"Invalid query: " +
"Index \"" + userProvidedIndexName + "\" " +
"is annotateded as both a LSI and a GSI for attribute.");
}
// Hash key conditions
for (String hashKeyName : hashKeyConditions.keySet()) {
if (hashKeyName.equals(primaryHashKeyName)) {
hasPrimaryHashKeyCondition = true;
}
final DynamoDBMapperFieldModel hk = model.field(hashKeyName);
Collection annotatedGSINames = hk.globalSecondaryIndexNames(HASH);
annotatedGSIsOnHashKeys.put(hashKeyName,
annotatedGSINames == null ? new HashSet() : new HashSet(annotatedGSINames));
// Additional validation if the user provided an index name.
if (userProvidedIndexName != null) {
boolean foundHashKeyConditionValidWithUserProvidedIndex = false;
if (userProvidedLSI && hashKeyName.equals(primaryHashKeyName)) {
// found an applicable hash key condition (primary hash + LSI range)
foundHashKeyConditionValidWithUserProvidedIndex = true;
} else if (userProvidedGSI &&
annotatedGSINames != null && annotatedGSINames.contains(userProvidedIndexName)) {
// found an applicable hash key condition (GSI hash + range)
foundHashKeyConditionValidWithUserProvidedIndex = true;
}
if (foundHashKeyConditionValidWithUserProvidedIndex) {
if ( hashKeyNameForThisQuery != null ) {
throw new IllegalArgumentException(
"Ambiguous query expression: More than one hash key EQ conditions (" +
hashKeyNameForThisQuery + ", " + hashKeyName +
") are applicable to the specified index ("
+ userProvidedIndexName + "). " +
"Please provide only one of them in the query expression.");
} else {
// found an applicable hash key condition
hashKeyNameForThisQuery = hashKeyName;
}
}
}
}
// Collate all the key conditions
Map keyConditions = new HashMap();
// With user-provided index name
if (userProvidedIndexName != null) {
if (hasRangeKeyCondition
&& ( !userProvidedLSI )
&& ( !userProvidedGSI )) {
throw new IllegalArgumentException(
"Illegal query expression: No range key condition is applicable to the specified index ("
+ userProvidedIndexName + "). ");
}
if (hashKeyNameForThisQuery == null) {
throw new IllegalArgumentException(
"Illegal query expression: No hash key condition is applicable to the specified index ("
+ userProvidedIndexName + "). ");
}
keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery));
if (hasRangeKeyCondition) {
keyConditions.putAll(rangeKeyConditions);
}
}
// Infer the index name by finding the index shared by both hash and range key annotations.
else {
if (hasRangeKeyCondition) {
String inferredIndexName = null;
hashKeyNameForThisQuery = null;
if (hasPrimaryHashKeyCondition && hasPrimaryRangeKeyCondition) {
// Found valid query: primary hash + range key conditions
hashKeyNameForThisQuery = primaryHashKeyName;
} else {
// Intersect the set of all the indexes applicable to the range key
// with the set of indexes applicable to each hash key condition.
for (String hashKeyName : annotatedGSIsOnHashKeys.keySet()) {
boolean foundValidQueryExpressionWithInferredIndex = false;
String indexNameInferredByThisHashKey = null;
if (hashKeyName.equals(primaryHashKeyName)) {
if (annotatedLSIsOnRangeKey.size() == 1) {
// Found valid query (Primary hash + LSI range conditions)
foundValidQueryExpressionWithInferredIndex = true;
indexNameInferredByThisHashKey = annotatedLSIsOnRangeKey.iterator().next();
}
}
Set annotatedGSIsOnHashKey = annotatedGSIsOnHashKeys.get(hashKeyName);
// We don't need the data in annotatedGSIsOnHashKeys afterwards,
// so it's safe to do the intersection in-place.
annotatedGSIsOnHashKey.retainAll(annotatedGSIsOnRangeKey);
if (annotatedGSIsOnHashKey.size() == 1) {
// Found valid query (Hash + range conditions on a GSI)
if (foundValidQueryExpressionWithInferredIndex) {
hashKeyNameForThisQuery = hashKeyName;
inferredIndexName = indexNameInferredByThisHashKey;
}
foundValidQueryExpressionWithInferredIndex = true;
indexNameInferredByThisHashKey = annotatedGSIsOnHashKey.iterator().next();
}
if (foundValidQueryExpressionWithInferredIndex) {
if (hashKeyNameForThisQuery != null) {
throw new IllegalArgumentException(
"Ambiguous query expression: Found multiple valid queries: " +
"(Hash: \"" + hashKeyNameForThisQuery + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + inferredIndexName + "\") and " +
"(Hash: \"" + hashKeyName + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + indexNameInferredByThisHashKey + "\").");
} else {
hashKeyNameForThisQuery = hashKeyName;
inferredIndexName = indexNameInferredByThisHashKey;
}
}
}
}
if (hashKeyNameForThisQuery != null) {
keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery));
keyConditions.putAll(rangeKeyConditions);
queryRequest.setIndexName(inferredIndexName);
} else {
throw new IllegalArgumentException(
"Illegal query expression: Cannot infer the index name from the query expression.");
}
} else {
// No range key condition is specified.
if (hashKeyConditions.size() > 1) {
if ( hasPrimaryHashKeyCondition ) {
keyConditions.put(primaryHashKeyName, hashKeyConditions.get(primaryHashKeyName));
} else {
throw new IllegalArgumentException(
"Ambiguous query expression: More than one index hash key EQ conditions (" +
hashKeyConditions.keySet() +
") are applicable to the query. " +
"Please provide only one of them in the query expression, or specify the appropriate index name.");
}
} else {
// Only one hash key condition
String hashKeyName = annotatedGSIsOnHashKeys.keySet().iterator().next();
if ( !hasPrimaryHashKeyCondition ) {
if (annotatedGSIsOnHashKeys.get(hashKeyName).size() == 1) {
// Set the index if the index hash key is only annotated with one GSI.
queryRequest.setIndexName(annotatedGSIsOnHashKeys.get(hashKeyName).iterator().next());
} else if (annotatedGSIsOnHashKeys.get(hashKeyName).size() > 1) {
throw new IllegalArgumentException(
"Ambiguous query expression: More than one GSIs (" +
annotatedGSIsOnHashKeys.get(hashKeyName) +
") are applicable to the query. " +
"Please specify one of them in your query expression.");
} else {
throw new IllegalArgumentException(
"Illegal query expression: No GSI is found in the @DynamoDBIndexHashKey annotation for attribute " +
"\"" + hashKeyName + "\".");
}
}
keyConditions.putAll(hashKeyConditions);
}
}
}
queryRequest.setKeyConditions(keyConditions);
}
private AttributeTransformer.Parameters toParameters(
final Map attributeValues,
final Class modelClass,
final String tableName,
final DynamoDBMapperConfig mapperConfig) {
return toParameters(attributeValues, false, modelClass, tableName, mapperConfig);
}
private AttributeTransformer.Parameters toParameters(
final Map attributeValues,
final boolean partialUpdate,
final Class modelClass,
final String tableName,
final DynamoDBMapperConfig mapperConfig) {
return new TransformerParameters(
getTableModel(modelClass, mapperConfig),
attributeValues,
partialUpdate,
modelClass,
mapperConfig,
tableName);
}
final List> toParameters(
final List> attributeValues,
final Class modelClass,
final String tableName,
final DynamoDBMapperConfig mapperConfig
) {
if(attributeValues == null) {
return Collections.emptyList();
}
List> rval = new ArrayList>(attributeValues.size());
for (Map item : attributeValues) {
rval.add(toParameters(item, modelClass, tableName, mapperConfig));
}
return rval;
}
/**
* The one true implementation of AttributeTransformer.Parameters.
*/
private static class TransformerParameters
implements AttributeTransformer.Parameters {
private final DynamoDBMapperTableModel model;
private final Map attributeValues;
private final boolean partialUpdate;
private final Class modelClass;
private final DynamoDBMapperConfig mapperConfig;
private final String tableName;
public TransformerParameters(
final DynamoDBMapperTableModel model,
final Map attributeValues,
final boolean partialUpdate,
final Class modelClass,
final DynamoDBMapperConfig mapperConfig,
final String tableName) {
this.model = model;
this.attributeValues =
Collections.unmodifiableMap(attributeValues);
this.partialUpdate = partialUpdate;
this.modelClass = modelClass;
this.mapperConfig = mapperConfig;
this.tableName = tableName;
}
@Override
public Map getAttributeValues() {
return attributeValues;
}
@Override
public boolean isPartialUpdate() {
return partialUpdate;
}
@Override
public Class getModelClass() {
return modelClass;
}
@Override
public DynamoDBMapperConfig getMapperConfig() {
return mapperConfig;
}
@Override
public String getTableName() {
return tableName;
}
@Override
public String getHashKeyName() {
return model.hashKey().name();
}
@Override
public String getRangeKeyName() {
return model.rangeKeyIfExists() == null ? null : model.rangeKey().name();
}
}
private Map untransformAttributes(
final AttributeTransformer.Parameters> parameters
) {
if (transformer != null) {
return transformer.untransform(parameters);
} else {
return parameters.getAttributeValues();
}
}
private Map transformAttributes(
final AttributeTransformer.Parameters> parameters) {
if (transformer != null) {
return transformer.transform(parameters);
} else {
return parameters.getAttributeValues();
}
}
/**
* Returns a new map object that merges the two sets of expected value
* conditions (user-specified or imposed by the internal implementation of
* DynamoDBMapper). Internal assertion on an attribute will be overridden by
* any user-specified condition on the same attribute.
*
* Exception is thrown if the two sets of conditions cannot be combined
* together.
*/
private static Map mergeExpectedAttributeValueConditions(
Map internalAssertions,
Map userProvidedConditions,
String userProvidedConditionOperator) {
// If any of the condition map is null, simply return a copy of the other one.
if ((internalAssertions == null || internalAssertions.isEmpty())
&& (userProvidedConditions == null || userProvidedConditions.isEmpty())) {
return null;
} else if (internalAssertions == null) {
return new HashMap(userProvidedConditions);
} else if (userProvidedConditions == null) {
return new HashMap(internalAssertions);
}
// Start from a copy of the internal conditions
Map mergedExpectedValues =
new HashMap(internalAssertions);
// Remove internal conditions that are going to be overlaid by user-provided ones.
for (String attrName : userProvidedConditions.keySet()) {
mergedExpectedValues.remove(attrName);
}
// All the generated internal conditions must be joined by AND.
// Throw an exception if the user specifies an OR operator, and that the
// internal conditions are not totally overlaid by the user-provided
// ones.
if ( ConditionalOperator.OR.toString().equals(userProvidedConditionOperator)
&& !mergedExpectedValues.isEmpty() ) {
throw new IllegalArgumentException("Unable to assert the value of the fields "
+ mergedExpectedValues.keySet() + ", since the expected value conditions cannot be combined "
+ "with user-specified conditions joined by \"OR\". You can use SaveBehavior.CLOBBER to "
+ "skip the assertion on these fields.");
}
mergedExpectedValues.putAll(userProvidedConditions);
return mergedExpectedValues;
}
static X applyUserAgent(X request) {
request.getRequestClientOptions().appendUserAgent(USER_AGENT);
return request;
}
static X applyBatchOperationUserAgent(X request) {
request.getRequestClientOptions().appendUserAgent(USER_AGENT_BATCH_OPERATION);
return request;
}
@Override
public S3ClientCache getS3ClientCache() {
return s3Links.getS3ClientCache();
}
@Override
public S3Link createS3Link(Region s3region, String bucketName, String key) {
return s3Links.createS3Link(s3region, bucketName, key);
}
@Override
public S3Link createS3Link(String s3region, String bucketName, String key) {
return s3Links.createS3Link(s3region, bucketName, key);
}
@Override
public CreateTableRequest generateCreateTableRequest(Class clazz, DynamoDBMapperConfig config) {
config = mergeConfig(config);
final DynamoDBMapperTableModel model = getTableModel(clazz, config);
final CreateTableRequest request = new CreateTableRequest();
request.setTableName(getTableName(clazz, config));
request.withKeySchema(new KeySchemaElement(model.hashKey().name(), HASH));
if (model.rangeKeyIfExists() != null) {
request.withKeySchema(new KeySchemaElement(model.rangeKey().name(), RANGE));
}
request.setGlobalSecondaryIndexes(model.globalSecondaryIndexes());
request.setLocalSecondaryIndexes(model.localSecondaryIndexes());
for (final DynamoDBMapperFieldModel field : model.fields()) {
if (field.keyType() != null || field.indexed()) {
request.withAttributeDefinitions(new AttributeDefinition()
.withAttributeType(ScalarAttributeType.valueOf(field.attributeType().name()))
.withAttributeName(field.name())
);
}
}
return request;
}
@Override
public DeleteTableRequest generateDeleteTableRequest(Class clazz, DynamoDBMapperConfig config) {
config = mergeConfig(config);
DeleteTableRequest deleteTableRequest = new DeleteTableRequest();
deleteTableRequest.setTableName(getTableName(clazz, config));
return deleteTableRequest;
}
/**
* Creates a new table mapper using this mapper to perform operations.
* @param The object type which this mapper operates.
* @param The hash key value type.
* @param The range key value type; use ?
if no range key.
* @param clazz The object class.
* @return The table mapper.
*/
public DynamoDBTableMapper newTableMapper(Class clazz) {
DynamoDBMapperConfig config = mergeConfig(null);
return new DynamoDBTableMapper(this.db, this, config, getTableModel(clazz, config));
}
/**
* The return type of batchWrite, batchDelete and batchSave.
*
* It contains the information about the unprocessed items and the
* exception causing the failure.
*/
public static class FailedBatch {
private Map> unprocessedItems;
private Exception exception;
public void setUnprocessedItems(Map> unprocessedItems) {
this.unprocessedItems = unprocessedItems;
}
public Map