All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapper Maven / Gradle / Ivy

Go to download

The AWS SDK for Java with support for OSGi. The AWS SDK for Java provides Java APIs for building software on AWS' cost-effective, scalable, and reliable infrastructure products. The AWS Java SDK allows developers to code against APIs for all of Amazon's infrastructure web services (Amazon S3, Amazon EC2, Amazon SQS, Amazon Relational Database Service, Amazon AutoScaling, etc).

There is a newer version: 1.11.60
Show newest version
/*
 * Copyright 2015-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *    http://aws.amazon.com/apache2.0
 *
 * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
 * OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and
 * limitations under the License.
 */
package com.amazonaws.services.dynamodbv2.datamodeling;

import static com.amazonaws.services.dynamodbv2.model.KeyType.HASH;
import static com.amazonaws.services.dynamodbv2.model.KeyType.RANGE;

import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.retry.RetryUtils;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.BatchLoadRetryStrategy;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.BatchWriteRetryStrategy;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.ConsistentReads;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.SaveBehavior;
import com.amazonaws.services.dynamodbv2.model.AttributeAction;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemResult;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult;
import com.amazonaws.services.dynamodbv2.model.ComparisonOperator;
import com.amazonaws.services.dynamodbv2.model.Condition;
import com.amazonaws.services.dynamodbv2.model.ConditionalCheckFailedException;
import com.amazonaws.services.dynamodbv2.model.ConditionalOperator;
import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteItemRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest;
import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue;
import com.amazonaws.services.dynamodbv2.model.GetItemRequest;
import com.amazonaws.services.dynamodbv2.model.GetItemResult;
import com.amazonaws.services.dynamodbv2.model.GlobalSecondaryIndex;
import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
import com.amazonaws.services.dynamodbv2.model.KeysAndAttributes;
import com.amazonaws.services.dynamodbv2.model.LocalSecondaryIndex;
import com.amazonaws.services.dynamodbv2.model.PutItemRequest;
import com.amazonaws.services.dynamodbv2.model.PutItemResult;
import com.amazonaws.services.dynamodbv2.model.PutRequest;
import com.amazonaws.services.dynamodbv2.model.QueryRequest;
import com.amazonaws.services.dynamodbv2.model.QueryResult;
import com.amazonaws.services.dynamodbv2.model.ReturnValue;
import com.amazonaws.services.dynamodbv2.model.ScanRequest;
import com.amazonaws.services.dynamodbv2.model.ScanResult;
import com.amazonaws.services.dynamodbv2.model.Select;
import com.amazonaws.services.dynamodbv2.model.UpdateItemRequest;
import com.amazonaws.services.dynamodbv2.model.UpdateItemResult;
import com.amazonaws.services.dynamodbv2.model.WriteRequest;
import com.amazonaws.services.s3.model.Region;
import com.amazonaws.util.VersionInfoUtils;

/**
 * Object mapper for domain-object interaction with DynamoDB.
 * 

* To use, define a domain class that represents an item in a DynamoDB table and * annotate it with the annotations found in the * com.amazonaws.services.dynamodbv2.datamodeling package. In order to allow the * mapper to correctly persist the data, each modeled property in the domain * class should be accessible via getter and setter methods, and each property * annotation should be either applied to the getter method or the class field. * A minimal example using getter annotations: * *

 * @DynamoDBTable(tableName = "TestTable")
 * public class TestClass {
 *
 *     private Long key;
 *     private double rangeKey;
 *     private Long version;
 *
 *     private Set<Integer> integerSetAttribute;
 *
 *     @DynamoDBHashKey
 *     public Long getKey() {
 *         return key;
 *     }
 *
 *     public void setKey(Long key) {
 *         this.key = key;
 *     }
 *
 *     @DynamoDBRangeKey
 *     public double getRangeKey() {
 *         return rangeKey;
 *     }
 *
 *     public void setRangeKey(double rangeKey) {
 *         this.rangeKey = rangeKey;
 *     }
 *
 *     @DynamoDBAttribute(attributeName = "integerSetAttribute")
 *     public Set<Integer> getIntegerAttribute() {
 *         return integerSetAttribute;
 *     }
 *
 *     public void setIntegerAttribute(Set<Integer> integerAttribute) {
 *         this.integerSetAttribute = integerAttribute;
 *     }
 *
 *     @DynamoDBVersionAttribute
 *     public Long getVersion() {
 *         return version;
 *     }
 *
 *     public void setVersion(Long version) {
 *         this.version = version;
 *     }
 * }
 * 
*

* Save instances of annotated classes to DynamoDB, retrieve them, and delete * them using the {@link DynamoDBMapper} class, as in the following example. * *

 * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
 * Long hashKey = 105L;
 * double rangeKey = 1.0d;
 * TestClass obj = mapper.load(TestClass.class, hashKey, rangeKey);
 * obj.getIntegerAttribute().add(42);
 * mapper.save(obj);
 * mapper.delete(obj);
 * 
*

* If you don't have your DynamoDB table set up yet, you can use * {@link DynamoDBMapper#generateCreateTableRequest(Class)} to construct the * {@link CreateTableRequest} for the table represented by your annotated class. * *

 * AmazonDynamoDB dynamoDBClient = new AmazonDynamoDBClient();
 * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
 * CreateTableRequest req = mapper.generateCreateTableRequest(TestClass.class);
 * // Table provision throughput is still required since it cannot be specified in your POJO
 * req.setProvisionedThroughput(new ProvisionedThroughput(5L, 5L));
 * // Fire off the CreateTableRequest using the low-level client
 * dynamoDBClient.createTable(req);
 * 
*

* When using the save, load, and delete methods, {@link DynamoDBMapper} will * throw {@link DynamoDBMappingException}s to indicate that domain classes are * incorrectly annotated or otherwise incompatible with this class. Service * exceptions will always be propagated as {@link AmazonClientException}, and * DynamoDB-specific subclasses such as {@link ConditionalCheckFailedException} * will be used when possible. *

* This class is thread-safe and can be shared between threads. It's also very * lightweight, so it doesn't need to be. * * @see DynamoDBTable * @see DynamoDBHashKey * @see DynamoDBRangeKey * @see DynamoDBAutoGeneratedKey * @see DynamoDBAttribute * @see DynamoDBVersionAttribute * @see DynamoDBIgnore * @see DynamoDBMarshalling * @see DynamoDBMapperConfig */ public class DynamoDBMapper extends AbstractDynamoDBMapper { private final S3ClientCache s3cc; private final AmazonDynamoDB db; private final DynamoDBMapperConfig config; private final DynamoDBMapperModelFactory.Factory models; private final AttributeTransformer transformer; /** * The max back off time for batch get. The configuration for batch write * has been moved to DynamoDBMapperConfig */ static final long MAX_BACKOFF_IN_MILLISECONDS = 1000 * 3; /** The max number of items allowed in a BatchWrite request */ static final int MAX_ITEMS_PER_BATCH = 25; /** * This retry count is applicable only when every batch get item request * results in no data retrieved from server and the un processed keys is * same as request items */ static final int BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS = 5; /** * User agent for requests made using the {@link DynamoDBMapper}. */ private static final String USER_AGENT = DynamoDBMapper.class.getName() + "/" + VersionInfoUtils.getVersion(); private static final String USER_AGENT_BATCH_OPERATION = DynamoDBMapper.class.getName() + "_batch_operation/" + VersionInfoUtils.getVersion(); private static final String NO_RANGE_KEY = new String(); private static final Log log = LogFactory.getLog(DynamoDBMapper.class); /** * Fail fast when trying to create a subclass of the DynamoDBMapper that * attempts to override one of the old {@code transformAttributes} methods. */ private static void failFastOnIncompatibleSubclass(Class clazz) { while (clazz != DynamoDBMapper.class) { Class[] classOverride = new Class[] { Class.class, Map.class }; Class[] nameOverride = new Class[] { String.class, String.class, Map.class }; for (Method method : clazz.getDeclaredMethods()) { if (method.getName().equals("transformAttributes")) { Class[] params = method.getParameterTypes(); if (Arrays.equals(params, classOverride) || Arrays.equals(params, nameOverride)) { throw new IllegalStateException( "The deprecated transformAttributes method is " + "no longer supported as of 1.9.0. Use an " + "AttributeTransformer to inject custom " + "attribute transformation logic."); } } } clazz = clazz.getSuperclass(); } } /** * Constructs a new mapper with the service object given, using the default * configuration. * * @param dynamoDB * The service object to use for all service calls. * @see DynamoDBMapperConfig#DEFAULT */ public DynamoDBMapper(final AmazonDynamoDB dynamoDB) { this(dynamoDB, DynamoDBMapperConfig.DEFAULT, null, null); } /** * Constructs a new mapper with the service object and configuration given. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config) { this(dynamoDB, config, null, null); } /** * Constructs a new mapper with the service object and S3 client cache * given, using the default configuration. * * @param ddb * The service object to use for all service calls. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. * @see DynamoDBMapperConfig#DEFAULT */ public DynamoDBMapper( final AmazonDynamoDB ddb, final AWSCredentialsProvider s3CredentialProvider) { this(ddb, DynamoDBMapperConfig.DEFAULT, s3CredentialProvider); } /** * Constructs a new mapper with the given service object, configuration, * and transform hook. * * @param dynamoDB * the service object to use for all service calls * @param config * the default configuration to use for all service calls. It * can be overridden on a per-operation basis * @param transformer * The custom attribute transformer to invoke when serializing or * deserializing an object. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AttributeTransformer transformer) { this(dynamoDB, config, transformer, null); } /** * Constructs a new mapper with the service object, configuration, and S3 * client cache given. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AWSCredentialsProvider s3CredentialProvider) { this(dynamoDB, config, null, validate(s3CredentialProvider)); } /** * Throws an exception if the given credentials provider is {@code null}. */ private static AWSCredentialsProvider validate( final AWSCredentialsProvider provider) { if (provider == null) { throw new IllegalArgumentException( "s3 credentials provider must not be null"); } return provider; } /** * Constructor with all parameters. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. * @param transformer * The custom attribute transformer to invoke when serializing or * deserializing an object. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AttributeTransformer transformer, final AWSCredentialsProvider s3CredentialsProvider) { failFastOnIncompatibleSubclass(getClass()); this.db = dynamoDB; this.config = config; this.transformer = transformer; if (s3CredentialsProvider == null) { this.s3cc = null; } else { this.s3cc = new S3ClientCache(s3CredentialsProvider); } this.models = StandardModelFactories.of(new ConversionSchema.Dependencies() .with(S3ClientCache.class, this.s3cc)); } private DynamoDBMapperTableModel getTableModel(Class clazz, DynamoDBMapperConfig config) { return this.models.getModelFactory(config).getTableModel(clazz); } @Override public T load(Class clazz, Object hashKey, DynamoDBMapperConfig config) { return load(clazz, hashKey, null, config); } @Override public T load(Class clazz, Object hashKey) { return load(clazz, hashKey, null, config); } @Override public T load(Class clazz, Object hashKey, Object rangeKey) { return load(clazz, hashKey, rangeKey, config); } @Override public T load(T keyObject) { return load(keyObject, this.config); } @Override public T load(T keyObject, DynamoDBMapperConfig config) { @SuppressWarnings("unchecked") Class clazz = (Class) keyObject.getClass(); config = mergeConfig(config); final DynamoDBMapperTableModel model = getTableModel(clazz, config); String tableName = getTableName(clazz, keyObject, config); GetItemRequest rq = new GetItemRequest() .withRequestMetricCollector(config.getRequestMetricCollector()); Map key = model.mapKey(keyObject); rq.setKey(key); rq.setTableName(tableName); rq.setConsistentRead(config.getConsistentReads() == ConsistentReads.CONSISTENT); GetItemResult item = db.getItem(applyUserAgent(rq)); Map itemAttributes = item.getItem(); if ( itemAttributes == null ) { return null; } T object = privateMarshallIntoObject( toParameters(itemAttributes, clazz, tableName, config)); return object; } @Override public T load(Class clazz, Object hashKey, Object rangeKey, DynamoDBMapperConfig config) { config = mergeConfig(config); final DynamoDBMapperTableModel model = getTableModel(clazz, config); T keyObject = model.newKey(hashKey, rangeKey); return load(keyObject, config); } /** * Returns a map of attribute name to EQ condition for the key prototype * object given. This method considers attributes annotated with either * {@link DynamoDBHashKey} or {@link DynamoDBIndexHashKey}. * * @param obj * The prototype object that includes the hash key value. * @return A map of hash key attribute name to EQ condition for the key * prototype object, or an empty map if obj is null. */ private Map getHashKeyEqualsConditions( DynamoDBMapperTableModel model, T obj) { Map conditions = new HashMap(); if (obj == null) { return conditions; } for (final DynamoDBMapperFieldModel field : model.fields()) { if (field.anyKey(HASH)) { final Object getterReturnResult = field.get(obj); if (getterReturnResult != null) { conditions.put(field.name(), field.eq(getterReturnResult)); } } } return conditions; } /** * Returns the table name for the class given. */ protected final String getTableName(final Class clazz, final DynamoDBMapperConfig config) { return internalGetTableName(clazz, null, config); } /** * Returns the table name for the class or object given. */ protected final String getTableName(final Class clazz, final Object object, final DynamoDBMapperConfig config) { return internalGetTableName(clazz, object, config); } static String internalGetTableName(final Class clazz, final Object object, final DynamoDBMapperConfig config) { // Resolve by object, if possible and desired DynamoDBMapperConfig.ObjectTableNameResolver objectResolver = config.getObjectTableNameResolver(); if (object != null && objectResolver != null) { return objectResolver.getTableName(object, config); } // Resolve by class return config.getTableNameResolver(true).getTableName(clazz, config); } @Override public T marshallIntoObject(Class clazz, Map itemAttributes) { String tableName = getTableName(clazz, config); return privateMarshallIntoObject( toParameters(itemAttributes, clazz, tableName, config)); } /** * The one true implementation of marshallIntoObject. */ private T privateMarshallIntoObject( AttributeTransformer.Parameters parameters) { Class clazz = parameters.getModelClass(); Map values = untransformAttributes(parameters); final DynamoDBMapperTableModel model = getTableModel(clazz, parameters.getMapperConfig()); return model.unconvert(values); } @Override public List marshallIntoObjects(Class clazz, List> itemAttributes) { List result = new ArrayList(itemAttributes.size()); for (Map item : itemAttributes) { result.add(marshallIntoObject(clazz, item)); } return result; } /** * A replacement for {@link #marshallIntoObjects(Class, List)} that takes * an extra set of parameters to be tunneled through to * {@code privateMarshalIntoObject} (if nothing along the way is * overridden). It's package-private because some of the Paginated*List * classes call back into it, but final because no one, even in this * package, should ever override it. */ final List marshallIntoObjects( final List> parameters ) { List result = new ArrayList(parameters.size()); for (AttributeTransformer.Parameters entry : parameters) { result.add(privateMarshallIntoObject(entry)); } return result; } @Override public void save(T object) { save(object, null, config); } @Override public void save(T object, DynamoDBSaveExpression saveExpression) { save(object, saveExpression, config); } @Override public void save(T object, DynamoDBMapperConfig config) { save(object, null, config); } @Override public void save(T object, DynamoDBSaveExpression saveExpression, final DynamoDBMapperConfig config) { final DynamoDBMapperConfig finalConfig = mergeConfig(config); @SuppressWarnings("unchecked") Class clazz = (Class) object.getClass(); String tableName = getTableName(clazz, object, finalConfig); final DynamoDBMapperTableModel model = getTableModel(clazz, finalConfig); /* * We force a putItem request instead of updateItem request either when * CLOBBER is configured, or part of the primary key of the object needs * to be auto-generated. */ boolean forcePut = (finalConfig.getSaveBehavior() == SaveBehavior.CLOBBER) || model.anyKeyGeneratable(object, finalConfig.getSaveBehavior()); SaveObjectHandler saveObjectHandler; if (forcePut) { saveObjectHandler = this.new SaveObjectHandler(clazz, object, tableName, finalConfig, saveExpression) { @Override protected void onPrimaryKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue) { /* Treat key values as common attribute value updates. */ getAttributeValueUpdates().put(attributeName, new AttributeValueUpdate().withValue(keyAttributeValue) .withAction("PUT")); } /* Use default implementation of onNonKeyAttribute(...) */ @Override protected void onNullNonKeyAttribute(String attributeName) { /* When doing a force put, we can safely ignore the null-valued attributes. */ return; } @Override protected void executeLowLevelRequest() { /* Send a putItem request */ doPutItem(); } }; } else { saveObjectHandler = this.new SaveObjectHandler(clazz, object, tableName, finalConfig, saveExpression) { @Override protected void onPrimaryKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue) { /* Put it in the key collection which is later used in the updateItem request. */ getPrimaryKeyAttributeValues().put(attributeName, keyAttributeValue); } @Override protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) { /* If it's a set attribute and the mapper is configured with APPEND_SET, * we do an "ADD" update instead of the default "PUT". */ if (getLocalSaveBehavior() == SaveBehavior.APPEND_SET) { if (currentValue.getBS() != null || currentValue.getNS() != null || currentValue.getSS() != null) { getAttributeValueUpdates().put( attributeName, new AttributeValueUpdate().withValue( currentValue).withAction("ADD")); return; } } /* Otherwise, we do the default "PUT" update. */ super.onNonKeyAttribute(attributeName, currentValue); } @Override protected void onNullNonKeyAttribute(String attributeName) { /* * If UPDATE_SKIP_NULL_ATTRIBUTES or APPEND_SET is * configured, we don't delete null value attributes. */ if (getLocalSaveBehavior() == SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES || getLocalSaveBehavior() == SaveBehavior.APPEND_SET) { return; } else { /* Delete attributes that are set as null in the object. */ getAttributeValueUpdates() .put(attributeName, new AttributeValueUpdate() .withAction("DELETE")); } } @Override protected void executeLowLevelRequest() { UpdateItemResult updateItemResult = doUpdateItem(); // The UpdateItem request is specified to return ALL_NEW // attributes of the affected item. So if the returned // UpdateItemResult does not include any ReturnedAttributes, // it indicates the UpdateItem failed silently (e.g. the // key-only-put nightmare - // https://forums.aws.amazon.com/thread.jspa?threadID=86798&tstart=25), // in which case we should re-send a PutItem // request instead. if (updateItemResult.getAttributes() == null || updateItemResult.getAttributes().isEmpty()) { // Before we proceed with PutItem, we need to put all // the key attributes (prepared for the // UpdateItemRequest) into the AttributeValueUpdates // collection. for (String keyAttributeName : getPrimaryKeyAttributeValues().keySet()) { getAttributeValueUpdates().put(keyAttributeName, new AttributeValueUpdate() .withValue(getPrimaryKeyAttributeValues().get(keyAttributeName)) .withAction("PUT")); } doPutItem(); } } }; } saveObjectHandler.execute(); } /** * The handler for saving object using DynamoDBMapper. Caller should * implement the abstract methods to provide the expected behavior on each * scenario, and this handler will take care of all the other basic workflow * and common operations. */ protected abstract class SaveObjectHandler { protected final Object object; protected final Class clazz; private final String tableName; private final DynamoDBMapperConfig saveConfig; private final Map primaryKeys; private final Map updateValues; /** * Any expected value conditions specified by the implementation of * DynamoDBMapper, e.g. value assertions on versioned attributes. */ private final Map internalExpectedValueAssertions; /** * Additional expected value conditions specified by the user. */ protected final Map userProvidedExpectedValueConditions; /** * Condition operator on the additional expected value conditions * specified by the user. */ protected final String userProvidedConditionOperator; private final List inMemoryUpdates; /** * Constructs a handler for saving the specified model object. * * @param object The model object to be saved. * @param clazz The domain class of the object. * @param tableName The table name. * @param saveConifg The mapper configuration used for this save. * @param saveExpression The save expression, including the user-provided conditions and an optional logic operator. */ public SaveObjectHandler( Class clazz, Object object, String tableName, DynamoDBMapperConfig saveConfig, DynamoDBSaveExpression saveExpression) { this.clazz = clazz; this.object = object; this.tableName = tableName; this.saveConfig = saveConfig; if (saveExpression != null) { userProvidedExpectedValueConditions = saveExpression .getExpected(); userProvidedConditionOperator = saveExpression .getConditionalOperator(); } else { userProvidedExpectedValueConditions = null; userProvidedConditionOperator = null; } updateValues = new HashMap(); internalExpectedValueAssertions = new HashMap(); inMemoryUpdates = new LinkedList(); primaryKeys = new HashMap(); } /** * The general workflow of a save operation. */ public void execute() { final DynamoDBMapperTableModel model = getTableModel((Class)clazz, saveConfig); for ( final DynamoDBMapperFieldModel field : model.fields() ) { if ( field.canGenerate(object, getLocalSaveBehavior(), model) ) { if ( field.anyKey() ) { onAutoGenerateAssignableKey(field); } else if ( field.versioned() ) { onVersionAttribute(field); } else { onAutoGenerate(field); } } else if ( field.keyType() != null ) { AttributeValue newAttributeValue = field.getAndConvert(object); if ( newAttributeValue == null ) { throw new DynamoDBMappingException(field.id().format("null or empty value for primary key")); } if ( newAttributeValue.getS() == null && newAttributeValue.getN() == null && newAttributeValue.getB() == null) { throw new DynamoDBMappingException(field.id().format( "keys must be scalar values (String, Number, " + "or Binary). Got " + newAttributeValue)); } onPrimaryKeyAttributeValue(field.name(), newAttributeValue); } else { AttributeValue currentValue = field.getAndConvert(object); if ( currentValue != null ) { onNonKeyAttribute(field.name(), currentValue); } else { onNullNonKeyAttribute(field.name()); } } } /* * Execute the implementation of the low level request. */ executeLowLevelRequest(); /* * Finally, after the service call has succeeded, update the * in-memory object with new field values as appropriate. This * currently takes into account of auto-generated keys and versioned * attributes. */ for ( ValueUpdate update : inMemoryUpdates ) { update.apply(); } } /** * Implement this method to do the necessary operations when a primary key * attribute is set with some value. * * @param attributeName * The name of the primary key attribute. * @param keyAttributeValue * The AttributeValue of the primary key attribute as specified in * the object. */ protected abstract void onPrimaryKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue); /** * Implement this method for necessary operations when a non-key * attribute is set a non-null value in the object. * The default implementation simply adds a "PUT" update for the given attribute. * * @param attributeName * The name of the non-key attribute. * @param currentValue * The updated value of the given attribute. */ protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) { updateValues.put(attributeName, new AttributeValueUpdate() .withValue(currentValue).withAction("PUT")); } /** * Implement this method for necessary operations when a non-key * attribute is set null in the object. * * @param attributeName * The name of the non-key attribute. */ protected abstract void onNullNonKeyAttribute(String attributeName); /** * Implement this method to send the low-level request that is necessary * to complete the save operation. */ protected abstract void executeLowLevelRequest(); /** Get the SaveBehavior used locally for this save operation. **/ protected SaveBehavior getLocalSaveBehavior() { return saveConfig.getSaveBehavior(); } /** Get the table name **/ protected String getTableName() { return tableName; } /** Get the map of all the specified primamry keys of the saved object. **/ protected Map getPrimaryKeyAttributeValues() { return primaryKeys; } /** Get the map of AttributeValueUpdate on each modeled attribute. **/ protected Map getAttributeValueUpdates() { return updateValues; } /** * Merge and return all the expected value conditions (either * user-specified or imposed by the internal implementation of * DynamoDBMapper) for this save operation. */ protected Map mergeExpectedAttributeValueConditions() { return DynamoDBMapper.mergeExpectedAttributeValueConditions( internalExpectedValueAssertions, userProvidedExpectedValueConditions, userProvidedConditionOperator); } /** Get the list of all the necessary in-memory update on the object. **/ protected List getInMemoryUpdates() { return inMemoryUpdates; } /** * Save the item using a UpdateItem request. The handler will call this * method if *
    *
  • CLOBBER configuration is not being used; *
  • AND the item does not contain auto-generated key value; *
*

* The ReturnedValues parameter for the UpdateItem request is set as * ALL_NEW, which means the service should return all of the attributes * of the new version of the item after the update. The handler will use * the returned attributes to detect silent failure on the server-side. */ protected UpdateItemResult doUpdateItem() { UpdateItemRequest req = new UpdateItemRequest() .withTableName(getTableName()) .withKey(getPrimaryKeyAttributeValues()) .withAttributeUpdates( transformAttributeUpdates( this.clazz, getTableName(), getPrimaryKeyAttributeValues(), getAttributeValueUpdates(), saveConfig)) .withExpected(mergeExpectedAttributeValueConditions()) .withConditionalOperator(userProvidedConditionOperator) .withReturnValues(ReturnValue.ALL_NEW) .withRequestMetricCollector(saveConfig.getRequestMetricCollector()); return db.updateItem(applyUserAgent(req)); } /** * Save the item using a PutItem request. The handler will call this * method if *

    *
  • CLOBBER configuration is being used; *
  • OR the item contains auto-generated key value; *
  • OR an UpdateItem request has silently failed (200 response with * no affected attribute), which indicates the key-only-put scenario * that we used to handle by the keyOnlyPut(...) hack. *
*/ protected PutItemResult doPutItem() { Map attributeValues = convertToItem(getAttributeValueUpdates()); attributeValues = transformAttributes( toParameters(attributeValues, this.clazz, getTableName(), saveConfig)); PutItemRequest req = new PutItemRequest() .withTableName(getTableName()) .withItem(attributeValues) .withExpected(mergeExpectedAttributeValueConditions()) .withConditionalOperator(userProvidedConditionOperator) .withRequestMetricCollector(saveConfig.getRequestMetricCollector()); return db.putItem(applyUserAgent(req)); } /** * Auto-generates the attribute value. * @param mapping The mapping details. */ private void onAutoGenerate(DynamoDBMapperFieldModel field) { AttributeValue value = field.generateAndConvert(object); updateValues.put(field.name(), new AttributeValueUpdate().withAction("PUT").withValue(value)); inMemoryUpdates.add(new ValueUpdate(field, value, object)); } /** * Auto-generates the key. * @param mapping The mapping details. */ private void onAutoGenerateAssignableKey(DynamoDBMapperFieldModel field) { // Generate the new key value first, then ensure it doesn't exist. onAutoGenerate(field); if ( getLocalSaveBehavior() != SaveBehavior.CLOBBER && !internalExpectedValueAssertions.containsKey(field.name())) { // Add an expect clause to make sure that the item // doesn't already exist, since it's supposed to be new internalExpectedValueAssertions.put(field.name(), field.expectedNotExists()); } } /** * Auto-generates the version. * @param mapping The mapping details. */ private void onVersionAttribute(DynamoDBMapperFieldModel field) { if ( getLocalSaveBehavior() != SaveBehavior.CLOBBER && !internalExpectedValueAssertions.containsKey(field.name())) { // First establish the expected (current) value for the // update call // For new objects, insist that the value doesn't exist. // For existing ones, insist it has the old value. internalExpectedValueAssertions.put(field.name(), field.expectedIfExists(field.get(object))); } // Generate the new version value onAutoGenerate(field); } } @Override public void delete(Object object) { delete(object, null, this.config); } @Override public void delete(Object object, DynamoDBDeleteExpression deleteExpression) { delete(object, deleteExpression, this.config); } @Override public void delete(Object object, DynamoDBMapperConfig config) { delete(object, null, config); } @Override public void delete(T object, DynamoDBDeleteExpression deleteExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); @SuppressWarnings("unchecked") Class clazz = (Class) object.getClass(); final DynamoDBMapperTableModel model = getTableModel(clazz, config); String tableName = getTableName(clazz, object, config); Map key = model.mapKey(object); /* * If there is a version field, make sure we assert its value. If the * version field is null (only should happen in unusual circumstances), * pretend it doesn't have a version field after all. */ Map internalAssertions = new HashMap(); if ( config.getSaveBehavior() != SaveBehavior.CLOBBER ) { for ( final DynamoDBMapperFieldModel field : model.fields() ) { if ( field.versioned() ) { internalAssertions.put(field.name(), field.expectedIfExists(field.get(object))); break; } } } DeleteItemRequest req = new DeleteItemRequest().withKey(key) .withTableName(tableName).withExpected(internalAssertions) .withRequestMetricCollector(config.getRequestMetricCollector()); if (deleteExpression != null) { String conditionalExpression = deleteExpression.getConditionExpression(); if (conditionalExpression != null) { if (internalAssertions != null && !internalAssertions.isEmpty()) { throw new AmazonClientException( "Condition Expressions cannot be used if a versioned attribute is present"); } req = req .withConditionExpression(conditionalExpression) .withExpressionAttributeNames( deleteExpression.getExpressionAttributeNames()) .withExpressionAttributeValues( deleteExpression.getExpressionAttributeValues()); } req = req.withExpected( mergeExpectedAttributeValueConditions(internalAssertions, deleteExpression.getExpected(), deleteExpression.getConditionalOperator())) .withConditionalOperator( deleteExpression.getConditionalOperator()); } db.deleteItem(applyUserAgent(req)); } @Override public List batchDelete(Iterable objectsToDelete) { return batchWrite(Collections.emptyList(), objectsToDelete, this.config); } @Override public List batchDelete(Object... objectsToDelete) { return batchWrite(Collections.emptyList(), Arrays.asList(objectsToDelete), this.config); } @Override public List batchSave(Iterable objectsToSave) { return batchWrite(objectsToSave, Collections.emptyList(), this.config); } @Override public List batchSave(Object... objectsToSave) { return batchWrite(Arrays.asList(objectsToSave), Collections.emptyList(), this.config); } @Override public List batchWrite(Iterable objectsToWrite, Iterable objectsToDelete) { return batchWrite(objectsToWrite, objectsToDelete, this.config); } @Override public List batchWrite(Iterable objectsToWrite, Iterable objectsToDelete, DynamoDBMapperConfig config) { config = mergeConfig(config); List totalFailedBatches = new LinkedList(); HashMap> requestItems = new HashMap>(); List inMemoryUpdates = new LinkedList(); for ( Object toWrite : objectsToWrite ) { Class clazz = (Class)toWrite.getClass(); String tableName = getTableName(clazz, toWrite, config); Map attributeValues = new HashMap(); // Look at every getter and construct a value object for it final DynamoDBMapperTableModel model = getTableModel(clazz, config); for ( final DynamoDBMapperFieldModel field : model.fields() ) { AttributeValue currentValue = null; if ( field.canGenerate(toWrite, config.getSaveBehavior(), model) && !field.versioned() ) { currentValue = field.generateAndConvert(toWrite); inMemoryUpdates.add(new ValueUpdate(field, currentValue, toWrite)); } else { currentValue = field.getAndConvert(toWrite); } if ( currentValue != null ) { attributeValues.put(field.name(), currentValue); } } if ( !requestItems.containsKey(tableName) ) { requestItems.put(tableName, new LinkedList()); } AttributeTransformer.Parameters parameters = toParameters(attributeValues, clazz, tableName, config); requestItems.get(tableName).add( new WriteRequest().withPutRequest( new PutRequest().withItem( transformAttributes(parameters)))); } for ( Object toDelete : objectsToDelete ) { Class clazz = (Class)toDelete.getClass(); String tableName = getTableName(clazz, toDelete, config); final DynamoDBMapperTableModel model = getTableModel(clazz, config); Map key = model.mapKey(toDelete); if ( !requestItems.containsKey(tableName) ) { requestItems.put(tableName, new LinkedList()); } requestItems.get(tableName).add( new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(key))); } // Break into chunks of 25 items and make service requests to DynamoDB while ( !requestItems.isEmpty() ) { HashMap> batch = new HashMap>(); int i = 0; Iterator>> tableIter = requestItems.entrySet().iterator(); while ( tableIter.hasNext() && i < MAX_ITEMS_PER_BATCH ) { Entry> tableRequest = tableIter.next(); batch.put(tableRequest.getKey(), new LinkedList()); Iterator writeRequestIter = tableRequest.getValue().iterator(); while ( writeRequestIter.hasNext() && i++ < MAX_ITEMS_PER_BATCH ) { WriteRequest writeRequest = writeRequestIter.next(); batch.get(tableRequest.getKey()).add(writeRequest); writeRequestIter.remove(); } // If we've processed all the write requests for this table, // remove it from the parent iterator. if ( !writeRequestIter.hasNext() ) { tableIter.remove(); } } List failedBatches = writeOneBatch(batch, config.getBatchWriteRetryStrategy()); if (failedBatches != null) { totalFailedBatches.addAll(failedBatches); // If contains throttling exception, we do a backoff if (containsThrottlingException(failedBatches)) { try { Thread.sleep(1000 * 2); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } } // Once the entire batch is processed, update assigned keys in memory for ( ValueUpdate update : inMemoryUpdates ) { update.apply(); } return totalFailedBatches; } /** * Process one batch of requests(max 25). It will divide the batch if * receives request too large exception(the total size of the request is beyond 1M). */ private List writeOneBatch( Map> batch, BatchWriteRetryStrategy batchWriteRetryStrategy) { List failedBatches = new LinkedList(); Map> firstHalfBatch = new HashMap>(); Map> secondHalfBatch = new HashMap>(); FailedBatch failedBatch = doBatchWriteItemWithRetry(batch, batchWriteRetryStrategy); if (failedBatch != null) { // If the exception is request entity too large, we divide the batch // into smaller parts. if (failedBatch.getException() instanceof AmazonServiceException && RetryUtils.isRequestEntityTooLargeException((AmazonServiceException) failedBatch.getException())) { // If only one item left, the item size must beyond 64k, which // exceedes the limit. if (computeFailedBatchSize(failedBatch) == 1) { failedBatches.add(failedBatch); } else { divideBatch(batch, firstHalfBatch, secondHalfBatch); failedBatches.addAll(writeOneBatch(firstHalfBatch, batchWriteRetryStrategy)); failedBatches.addAll(writeOneBatch(secondHalfBatch, batchWriteRetryStrategy)); } } else { failedBatches.add(failedBatch); } } return failedBatches; } /** * Check whether there are throttling exception in the failed batches. */ private boolean containsThrottlingException (List failedBatches) { for (FailedBatch failedBatch : failedBatches) { Exception e = failedBatch.getException(); if (e instanceof AmazonServiceException && RetryUtils.isThrottlingException((AmazonServiceException) e)) { return true; } } return false; } /** * Divide the batch of objects to save into two smaller batches. Each contains half of the elements. */ private void divideBatch(Map> batch, Map> firstHalfBatch, Map> secondHalfBatch) { for (String key : batch.keySet()) { List requests = batch.get(key); List firstHalfRequests = requests.subList(0, requests.size() / 2); List secondHalfRequests = requests.subList(requests.size() / 2, requests.size()); firstHalfBatch.put(key, firstHalfRequests); secondHalfBatch.put(key, secondHalfRequests); } } /** * Count the total number of unprocessed items in the failed batch. */ private int computeFailedBatchSize(FailedBatch failedBatch) { int count = 0; for (String tableName : failedBatch.getUnprocessedItems().keySet()) { count += failedBatch.getUnprocessedItems().get(tableName).size(); } return count; } /** * Continue trying to process the batch and retry on UnproccessedItems as * according to the specified BatchWriteRetryStrategy */ private FailedBatch doBatchWriteItemWithRetry( Map> batch, BatchWriteRetryStrategy batchWriteRetryStrategy) { BatchWriteItemResult result = null; int retries = 0; int maxRetries = batchWriteRetryStrategy .getMaxRetryOnUnprocessedItems(Collections .unmodifiableMap(batch)); FailedBatch failedBatch = null; Map> pendingItems = batch; while (true) { try { result = db.batchWriteItem(applyBatchOperationUserAgent( new BatchWriteItemRequest().withRequestItems(pendingItems))); } catch (Exception e) { failedBatch = new FailedBatch(); failedBatch.setUnprocessedItems(pendingItems); failedBatch.setException(e); return failedBatch; } pendingItems = result.getUnprocessedItems(); if (pendingItems.size() > 0) { // return pendingItems as a FailedBatch if we have exceeded max retry if (maxRetries >= 0 && retries >= maxRetries) { failedBatch = new FailedBatch(); failedBatch.setUnprocessedItems(pendingItems); failedBatch.setException(null); return failedBatch; } pause(batchWriteRetryStrategy.getDelayBeforeRetryUnprocessedItems( Collections.unmodifiableMap(pendingItems), retries)); retries++; } else { break; } } return failedBatch; } @Override public Map> batchLoad(Iterable itemsToGet) { return batchLoad(itemsToGet, this.config); } @Override public Map> batchLoad(Iterable itemsToGet, DynamoDBMapperConfig config) { config = mergeConfig(config); boolean consistentReads = (config.getConsistentReads() == ConsistentReads.CONSISTENT); if (itemsToGet == null) { return new HashMap>(); } Map requestItems = new HashMap(); Map> classesByTableName = new HashMap>(); Map> resultSet = new HashMap>(); int count = 0; for ( Object keyObject : itemsToGet ) { Class clazz = (Class)keyObject.getClass(); final DynamoDBMapperTableModel model = getTableModel(clazz, config); String tableName = getTableName(clazz, keyObject, config); classesByTableName.put(tableName, clazz); if ( !requestItems.containsKey(tableName) ) { requestItems.put( tableName, new KeysAndAttributes().withConsistentRead(consistentReads).withKeys( new LinkedList>())); } requestItems.get(tableName).getKeys().add(model.mapKey(keyObject)); // Reach the maximum number which can be handled in a single batchGet if ( ++count == 100 ) { processBatchGetRequest(classesByTableName, requestItems, resultSet, config); requestItems.clear(); count = 0; } } if ( count > 0 ) { processBatchGetRequest(classesByTableName, requestItems, resultSet, config); } return resultSet; } @Override public Map> batchLoad(Map, List> itemsToGet) { return batchLoad(itemsToGet, this.config); } @Override public Map> batchLoad(Map, List> itemsToGet, DynamoDBMapperConfig config) { List keys = new ArrayList(); if ( itemsToGet != null ) { for ( Class clazz : itemsToGet.keySet() ) { if ( itemsToGet.get(clazz) != null ) { final DynamoDBMapperTableModel model = getTableModel(clazz, config); for ( KeyPair keyPair : itemsToGet.get(clazz) ) { keys.add(model.newKey(keyPair.getHashKey(), keyPair.getRangeKey())); } } } } return batchLoad(keys, config); } /** * @param config never null */ private void processBatchGetRequest( final Map> classesByTableName, final Map requestItems, final Map> resultSet, final DynamoDBMapperConfig config) { BatchGetItemResult batchGetItemResult = null; BatchGetItemRequest batchGetItemRequest = new BatchGetItemRequest() .withRequestMetricCollector(config.getRequestMetricCollector()); batchGetItemRequest.setRequestItems(requestItems); BatchLoadRetryStrategy batchLoadStrategy = config.getBatchLoadRetryStrategy(); BatchLoadContext batchLoadContext = new BatchLoadContext(batchGetItemRequest); int retries = 0; int noOfItemsInOriginalRequest = requestItems.size(); do { if ( batchGetItemResult != null ) { retries++; batchLoadContext.setRetriesAttempted(retries); if (batchGetItemResult.getUnprocessedKeys().size() > 0){ pause(batchLoadStrategy.getDelayBeforeNextRetry(batchLoadContext)); batchGetItemRequest.setRequestItems( batchGetItemResult.getUnprocessedKeys()); } } batchGetItemResult = db.batchGetItem( applyBatchOperationUserAgent(batchGetItemRequest)); Map>> responses = batchGetItemResult.getResponses(); for ( String tableName : responses.keySet() ) { List objects = null; if ( resultSet.get(tableName) != null ) { objects = resultSet.get(tableName); } else { objects = new LinkedList(); } Class clazz = classesByTableName.get(tableName); for ( Map item : responses.get(tableName) ) { AttributeTransformer.Parameters parameters = toParameters(item, clazz, tableName, config); objects.add(privateMarshallIntoObject(parameters)); } resultSet.put(tableName, objects); } batchLoadContext.setBatchGetItemResult(batchGetItemResult); // the number of unprocessed keys and Batch Load Strategy will drive the number of retries } while ( batchLoadStrategy.shouldRetry(batchLoadContext) ); //We still need to throw Amazon Client Exception when none of the requested keys are processed if(noOfItemsInOriginalRequest == batchGetItemResult.getUnprocessedKeys().size()) { throw new AmazonClientException("Batch Get Item request to server hasn't received any data. Please try again later"); } } private final class ValueUpdate { private final DynamoDBMapperFieldModel field; private final AttributeValue newValue; private final Object target; public ValueUpdate( DynamoDBMapperFieldModel field, AttributeValue newValue, Object target) { this.field = field; this.newValue = newValue; this.target = target; } public void apply() { field.unconvertAndSet(target, newValue); } } /** * Converts the {@link AttributeValueUpdate} map given to an equivalent * {@link AttributeValue} map. */ private Map convertToItem(Map putValues) { Map map = new HashMap(); for ( Entry entry : putValues.entrySet() ) { String attributeName = entry.getKey(); AttributeValue attributeValue = entry.getValue().getValue(); String attributeAction = entry.getValue().getAction(); /* * AttributeValueUpdate allows nulls for its values, since they are * semantically meaningful. AttributeValues never have null values. */ if ( attributeValue != null && !AttributeAction.DELETE.toString().equals(attributeAction)) { map.put(attributeName, attributeValue); } } return map; } @Override public PaginatedScanList scan(Class clazz, DynamoDBScanExpression scanExpression) { return scan(clazz, scanExpression, config); } @Override public PaginatedScanList scan(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); ScanResult scanResult = db.scan(applyUserAgent(scanRequest)); return new PaginatedScanList(this, clazz, db, scanRequest, scanResult, config.getPaginationLoadingStrategy(), config); } @Override public PaginatedParallelScanList parallelScan(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments) { return parallelScan(clazz, scanExpression, totalSegments, config); } @Override public PaginatedParallelScanList parallelScan(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments, DynamoDBMapperConfig config) { config = mergeConfig(config); // Create hard copies of the original scan request with difference segment number. List parallelScanRequests = createParallelScanRequestsFromExpression(clazz, scanExpression, totalSegments, config); ParallelScanTask parallelScanTask = new ParallelScanTask(db, parallelScanRequests); return new PaginatedParallelScanList(this, clazz, db, parallelScanTask, config.getPaginationLoadingStrategy(), config); } @Override public ScanResultPage scanPage(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); ScanResult scanResult = db.scan(applyUserAgent(scanRequest)); ScanResultPage result = new ScanResultPage(); List> parameters = toParameters(scanResult.getItems(), clazz, scanRequest.getTableName(), config); result.setResults(marshallIntoObjects(parameters)); result.setLastEvaluatedKey(scanResult.getLastEvaluatedKey()); result.setCount(scanResult.getCount()); result.setScannedCount(scanResult.getScannedCount()); result.setConsumedCapacity(scanResult.getConsumedCapacity()); return result; } @Override public ScanResultPage scanPage(Class clazz, DynamoDBScanExpression scanExpression) { return scanPage(clazz, scanExpression, this.config); } @Override public PaginatedQueryList query(Class clazz, DynamoDBQueryExpression queryExpression) { return query(clazz, queryExpression, config); } @Override public PaginatedQueryList query(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); QueryResult queryResult = db.query(applyUserAgent(queryRequest)); return new PaginatedQueryList(this, clazz, db, queryRequest, queryResult, config.getPaginationLoadingStrategy(), config); } @Override public QueryResultPage queryPage(Class clazz, DynamoDBQueryExpression queryExpression) { return queryPage(clazz, queryExpression, this.config); } @Override public QueryResultPage queryPage(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); QueryResult queryResult = db.query(applyUserAgent(queryRequest)); QueryResultPage result = new QueryResultPage(); List> parameters = toParameters(queryResult.getItems(), clazz, queryRequest.getTableName(), config); result.setResults(marshallIntoObjects(parameters)); result.setLastEvaluatedKey(queryResult.getLastEvaluatedKey()); result.setCount(queryResult.getCount()); result.setScannedCount(queryResult.getScannedCount()); result.setConsumedCapacity(queryResult.getConsumedCapacity()); return result; } @Override public int count(Class clazz, DynamoDBScanExpression scanExpression) { return count(clazz, scanExpression, config); } @Override public int count(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); scanRequest.setSelect(Select.COUNT); // Count scans can also be truncated for large datasets int count = 0; ScanResult scanResult = null; do { scanResult = db.scan(applyUserAgent(scanRequest)); count += scanResult.getCount(); scanRequest.setExclusiveStartKey(scanResult.getLastEvaluatedKey()); } while (scanResult.getLastEvaluatedKey() != null); return count; } @Override public int count(Class clazz, DynamoDBQueryExpression queryExpression) { return count(clazz, queryExpression, config); } @Override public int count(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); queryRequest.setSelect(Select.COUNT); // Count queries can also be truncated for large datasets int count = 0; QueryResult queryResult = null; do { queryResult = db.query(applyUserAgent(queryRequest)); count += queryResult.getCount(); queryRequest.setExclusiveStartKey(queryResult.getLastEvaluatedKey()); } while (queryResult.getLastEvaluatedKey() != null); return count; } /** * Merges the config object given with the one specified at construction and * returns the result. */ private DynamoDBMapperConfig mergeConfig(DynamoDBMapperConfig config) { if ( config != this.config ) config = new DynamoDBMapperConfig(this.config, config); return config; } /** * @param config never null */ private ScanRequest createScanRequestFromExpression(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { ScanRequest scanRequest = new ScanRequest(); scanRequest.setTableName(getTableName(clazz, config)); scanRequest.setIndexName(scanExpression.getIndexName()); scanRequest.setScanFilter(scanExpression.getScanFilter()); scanRequest.setLimit(scanExpression.getLimit()); scanRequest.setExclusiveStartKey(scanExpression.getExclusiveStartKey()); scanRequest.setTotalSegments(scanExpression.getTotalSegments()); scanRequest.setSegment(scanExpression.getSegment()); scanRequest.setConditionalOperator(scanExpression.getConditionalOperator()); scanRequest.setFilterExpression(scanExpression.getFilterExpression()); scanRequest.setExpressionAttributeNames(scanExpression .getExpressionAttributeNames()); scanRequest.setExpressionAttributeValues(scanExpression .getExpressionAttributeValues()); scanRequest.setRequestMetricCollector(config.getRequestMetricCollector()); scanRequest.setSelect(scanExpression.getSelect()); scanRequest.setProjectionExpression(scanExpression.getProjectionExpression()); scanRequest.setReturnConsumedCapacity(scanExpression.getReturnConsumedCapacity()); scanRequest.setConsistentRead(scanExpression.isConsistentRead()); return applyUserAgent(scanRequest); } /** * @param config never null */ private List createParallelScanRequestsFromExpression(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments, DynamoDBMapperConfig config) { if (totalSegments < 1) { throw new IllegalArgumentException("Parallel scan should have at least one scan segment."); } if (scanExpression.getExclusiveStartKey() != null) { log.info("The ExclusiveStartKey parameter specified in the DynamoDBScanExpression is ignored," + " since the individual parallel scan request on each segment is applied on a separate key scope."); } if (scanExpression.getSegment() != null || scanExpression.getTotalSegments() != null) { log.info("The Segment and TotalSegments parameters specified in the DynamoDBScanExpression are ignored."); } List parallelScanRequests= new LinkedList(); for (int segment = 0; segment < totalSegments; segment++) { ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); parallelScanRequests.add(scanRequest .withSegment(segment).withTotalSegments(totalSegments) .withExclusiveStartKey(null)); } return parallelScanRequests; } private QueryRequest createQueryRequestFromExpression(Class clazz, DynamoDBQueryExpression xpress, DynamoDBMapperConfig config) { final DynamoDBMapperTableModel model = getTableModel(clazz, config); QueryRequest req = new QueryRequest(); req.setConsistentRead(xpress.isConsistentRead()); req.setTableName(getTableName(clazz, xpress.getHashKeyValues(), config)); req.setIndexName(xpress.getIndexName()); // Hash key (primary or index) conditions Map hashKeyConditions = getHashKeyEqualsConditions( model, xpress.getHashKeyValues()); // Range key (primary or index) conditions Map rangeKeyConditions = xpress.getRangeKeyConditions(); req.setKeyConditionExpression(xpress.getKeyConditionExpression()); processKeyConditions(clazz, req, hashKeyConditions, rangeKeyConditions, model); req.withScanIndexForward(xpress.isScanIndexForward()) .withLimit(xpress.getLimit()) .withExclusiveStartKey(xpress.getExclusiveStartKey()) .withQueryFilter(xpress.getQueryFilter()) .withConditionalOperator(xpress.getConditionalOperator()) .withSelect(xpress.getSelect()) .withProjectionExpression(xpress.getProjectionExpression()) .withFilterExpression(xpress.getFilterExpression()) .withExpressionAttributeNames(xpress.getExpressionAttributeNames()) .withExpressionAttributeValues(xpress.getExpressionAttributeValues()) .withReturnConsumedCapacity(xpress.getReturnConsumedCapacity()) .withRequestMetricCollector(config.getRequestMetricCollector()) ; return applyUserAgent(req); } /** * Utility method for checking the validity of both hash and range key * conditions. It also tries to infer the correct index name from the POJO * annotation, if such information is not directly specified by the user. * * @param clazz * The domain class of the queried items. * @param queryRequest * The QueryRequest object to be sent to service. * @param hashKeyConditions * All the hash key EQ conditions extracted from the POJO object. * The mapper will choose one of them that could be applied together with * the user-specified (if any) index name and range key conditions. Or it * throws error if more than one conditions are applicable for the query. * @param rangeKeyConditions * The range conditions specified by the user. We currently only * allow at most one range key condition. */ private void processKeyConditions(Class clazz, QueryRequest queryRequest, Map hashKeyConditions, Map rangeKeyConditions, DynamoDBMapperTableModel model) { // There should be least one hash key condition. final String keyCondExpression = queryRequest.getKeyConditionExpression(); if (keyCondExpression == null) { if (hashKeyConditions == null || hashKeyConditions.isEmpty()) { throw new IllegalArgumentException( "Illegal query expression: No hash key condition is found in the query"); } } else { if (hashKeyConditions != null && !hashKeyConditions.isEmpty()) { throw new IllegalArgumentException( "Illegal query expression: Either the hash key conditions or the key condition expression must be specified but not both."); } if (rangeKeyConditions != null && !rangeKeyConditions.isEmpty()) { throw new IllegalArgumentException( "Illegal query expression: The range key conditions can only be specified when the key condition expression is not specified."); } // key condition expression is in use return; } // We don't allow multiple range key conditions. if (rangeKeyConditions != null && rangeKeyConditions.size() > 1) { throw new IllegalArgumentException( "Illegal query expression: Conditions on multiple range keys (" + rangeKeyConditions.keySet().toString() + ") are found in the query. DynamoDB service only accepts up to ONE range key condition."); } final boolean hasRangeKeyCondition = (rangeKeyConditions != null) && (!rangeKeyConditions.isEmpty()); final String userProvidedIndexName = queryRequest.getIndexName(); final String primaryHashKeyName = model.hashKey().name(); // First collect the names of all the global/local secondary indexes that could be applied to this query. // If the user explicitly specified an index name, we also need to // 1) check the index is applicable for both hash and range key conditions // 2) choose one hash key condition if there are more than one of them boolean hasPrimaryHashKeyCondition = false; final Map> annotatedGSIsOnHashKeys = new HashMap>(); String hashKeyNameForThisQuery = null; boolean hasPrimaryRangeKeyCondition = false; final Set annotatedLSIsOnRangeKey = new HashSet(); final Set annotatedGSIsOnRangeKey = new HashSet(); // Range key condition String rangeKeyNameForThisQuery = null; if (hasRangeKeyCondition) { for (String rangeKeyName : rangeKeyConditions.keySet()) { rangeKeyNameForThisQuery = rangeKeyName; final DynamoDBMapperFieldModel rk = model.field(rangeKeyName); if (rk.keyType() == RANGE) { hasPrimaryRangeKeyCondition = true; } annotatedLSIsOnRangeKey.addAll(rk.localSecondaryIndexNames()); annotatedGSIsOnRangeKey.addAll(rk.globalSecondaryIndexNames(RANGE)); } if ( !hasPrimaryRangeKeyCondition && annotatedLSIsOnRangeKey.isEmpty() && annotatedGSIsOnRangeKey.isEmpty()) { throw new DynamoDBMappingException( "The query contains a condition on a range key (" + rangeKeyNameForThisQuery + ") " + "that is not annotated with either @DynamoDBRangeKey or @DynamoDBIndexRangeKey."); } } final boolean userProvidedLSIWithRangeKeyCondition = (userProvidedIndexName != null) && (annotatedLSIsOnRangeKey.contains(userProvidedIndexName)); final boolean hashOnlyLSIQuery = (userProvidedIndexName != null) && ( !hasRangeKeyCondition ) && model.localSecondaryIndex(userProvidedIndexName) != null; final boolean userProvidedLSI = userProvidedLSIWithRangeKeyCondition || hashOnlyLSIQuery; final boolean userProvidedGSIWithRangeKeyCondition = (userProvidedIndexName != null) && (annotatedGSIsOnRangeKey.contains(userProvidedIndexName)); final boolean hashOnlyGSIQuery = (userProvidedIndexName != null) && ( !hasRangeKeyCondition ) && model.globalSecondaryIndex(userProvidedIndexName) != null; final boolean userProvidedGSI = userProvidedGSIWithRangeKeyCondition || hashOnlyGSIQuery; if (userProvidedLSI && userProvidedGSI ) { throw new DynamoDBMappingException( "Invalid query: " + "Index \"" + userProvidedIndexName + "\" " + "is annotateded as both a LSI and a GSI for attribute."); } // Hash key conditions for (String hashKeyName : hashKeyConditions.keySet()) { if (hashKeyName.equals(primaryHashKeyName)) { hasPrimaryHashKeyCondition = true; } final DynamoDBMapperFieldModel hk = model.field(hashKeyName); Collection annotatedGSINames = hk.globalSecondaryIndexNames(HASH); annotatedGSIsOnHashKeys.put(hashKeyName, annotatedGSINames == null ? new HashSet() : new HashSet(annotatedGSINames)); // Additional validation if the user provided an index name. if (userProvidedIndexName != null) { boolean foundHashKeyConditionValidWithUserProvidedIndex = false; if (userProvidedLSI && hashKeyName.equals(primaryHashKeyName)) { // found an applicable hash key condition (primary hash + LSI range) foundHashKeyConditionValidWithUserProvidedIndex = true; } else if (userProvidedGSI && annotatedGSINames != null && annotatedGSINames.contains(userProvidedIndexName)) { // found an applicable hash key condition (GSI hash + range) foundHashKeyConditionValidWithUserProvidedIndex = true; } if (foundHashKeyConditionValidWithUserProvidedIndex) { if ( hashKeyNameForThisQuery != null ) { throw new IllegalArgumentException( "Ambiguous query expression: More than one hash key EQ conditions (" + hashKeyNameForThisQuery + ", " + hashKeyName + ") are applicable to the specified index (" + userProvidedIndexName + "). " + "Please provide only one of them in the query expression."); } else { // found an applicable hash key condition hashKeyNameForThisQuery = hashKeyName; } } } } // Collate all the key conditions Map keyConditions = new HashMap(); // With user-provided index name if (userProvidedIndexName != null) { if (hasRangeKeyCondition && ( !userProvidedLSI ) && ( !userProvidedGSI )) { throw new IllegalArgumentException( "Illegal query expression: No range key condition is applicable to the specified index (" + userProvidedIndexName + "). "); } if (hashKeyNameForThisQuery == null) { throw new IllegalArgumentException( "Illegal query expression: No hash key condition is applicable to the specified index (" + userProvidedIndexName + "). "); } keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); if (hasRangeKeyCondition) { keyConditions.putAll(rangeKeyConditions); } } // Infer the index name by finding the index shared by both hash and range key annotations. else { if (hasRangeKeyCondition) { String inferredIndexName = null; hashKeyNameForThisQuery = null; if (hasPrimaryHashKeyCondition && hasPrimaryRangeKeyCondition) { // Found valid query: primary hash + range key conditions hashKeyNameForThisQuery = primaryHashKeyName; } else { // Intersect the set of all the indexes applicable to the range key // with the set of indexes applicable to each hash key condition. for (String hashKeyName : annotatedGSIsOnHashKeys.keySet()) { boolean foundValidQueryExpressionWithInferredIndex = false; String indexNameInferredByThisHashKey = null; if (hashKeyName.equals(primaryHashKeyName)) { if (annotatedLSIsOnRangeKey.size() == 1) { // Found valid query (Primary hash + LSI range conditions) foundValidQueryExpressionWithInferredIndex = true; indexNameInferredByThisHashKey = annotatedLSIsOnRangeKey.iterator().next(); } } Set annotatedGSIsOnHashKey = annotatedGSIsOnHashKeys.get(hashKeyName); // We don't need the data in annotatedGSIsOnHashKeys afterwards, // so it's safe to do the intersection in-place. annotatedGSIsOnHashKey.retainAll(annotatedGSIsOnRangeKey); if (annotatedGSIsOnHashKey.size() == 1) { // Found valid query (Hash + range conditions on a GSI) if (foundValidQueryExpressionWithInferredIndex) { hashKeyNameForThisQuery = hashKeyName; inferredIndexName = indexNameInferredByThisHashKey; } foundValidQueryExpressionWithInferredIndex = true; indexNameInferredByThisHashKey = annotatedGSIsOnHashKey.iterator().next(); } if (foundValidQueryExpressionWithInferredIndex) { if (hashKeyNameForThisQuery != null) { throw new IllegalArgumentException( "Ambiguous query expression: Found multiple valid queries: " + "(Hash: \"" + hashKeyNameForThisQuery + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + inferredIndexName + "\") and " + "(Hash: \"" + hashKeyName + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + indexNameInferredByThisHashKey + "\")."); } else { hashKeyNameForThisQuery = hashKeyName; inferredIndexName = indexNameInferredByThisHashKey; } } } } if (hashKeyNameForThisQuery != null) { keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); keyConditions.putAll(rangeKeyConditions); queryRequest.setIndexName(inferredIndexName); } else { throw new IllegalArgumentException( "Illegal query expression: Cannot infer the index name from the query expression."); } } else { // No range key condition is specified. if (hashKeyConditions.size() > 1) { if ( hasPrimaryHashKeyCondition ) { keyConditions.put(primaryHashKeyName, hashKeyConditions.get(primaryHashKeyName)); } else { throw new IllegalArgumentException( "Ambiguous query expression: More than one index hash key EQ conditions (" + hashKeyConditions.keySet() + ") are applicable to the query. " + "Please provide only one of them in the query expression, or specify the appropriate index name."); } } else { // Only one hash key condition String hashKeyName = annotatedGSIsOnHashKeys.keySet().iterator().next(); if ( !hasPrimaryHashKeyCondition ) { if (annotatedGSIsOnHashKeys.get(hashKeyName).size() == 1) { // Set the index if the index hash key is only annotated with one GSI. queryRequest.setIndexName(annotatedGSIsOnHashKeys.get(hashKeyName).iterator().next()); } else if (annotatedGSIsOnHashKeys.get(hashKeyName).size() > 1) { throw new IllegalArgumentException( "Ambiguous query expression: More than one GSIs (" + annotatedGSIsOnHashKeys.get(hashKeyName) + ") are applicable to the query. " + "Please specify one of them in your query expression."); } else { throw new IllegalArgumentException( "Illegal query expression: No GSI is found in the @DynamoDBIndexHashKey annotation for attribute " + "\"" + hashKeyName + "\"."); } } keyConditions.putAll(hashKeyConditions); } } } queryRequest.setKeyConditions(keyConditions); } private AttributeTransformer.Parameters toParameters( final Map attributeValues, final Class modelClass, final String tableName, final DynamoDBMapperConfig mapperConfig) { return toParameters(attributeValues, false, modelClass, tableName, mapperConfig); } private AttributeTransformer.Parameters toParameters( final Map attributeValues, final boolean partialUpdate, final Class modelClass, final String tableName, final DynamoDBMapperConfig mapperConfig) { return new TransformerParameters( getTableModel(modelClass, mapperConfig), attributeValues, partialUpdate, modelClass, mapperConfig, tableName); } final List> toParameters( final List> attributeValues, final Class modelClass, final String tableName, final DynamoDBMapperConfig mapperConfig ) { List> rval = new ArrayList>( attributeValues.size()); for (Map item : attributeValues) { rval.add(toParameters(item, modelClass, tableName, mapperConfig)); } return rval; } /** * The one true implementation of AttributeTransformer.Parameters. */ private static class TransformerParameters implements AttributeTransformer.Parameters { private final DynamoDBMapperTableModel model; private final Map attributeValues; private final boolean partialUpdate; private final Class modelClass; private final DynamoDBMapperConfig mapperConfig; private final String tableName; private String hashKeyName; private String rangeKeyName; public TransformerParameters( final DynamoDBMapperTableModel model, final Map attributeValues, final boolean partialUpdate, final Class modelClass, final DynamoDBMapperConfig mapperConfig, final String tableName) { this.model = model; this.attributeValues = Collections.unmodifiableMap(attributeValues); this.partialUpdate = partialUpdate; this.modelClass = modelClass; this.mapperConfig = mapperConfig; this.tableName = tableName; } @Override public Map getAttributeValues() { return attributeValues; } @Override public boolean isPartialUpdate() { return partialUpdate; } @Override public Class getModelClass() { return modelClass; } @Override public DynamoDBMapperConfig getMapperConfig() { return mapperConfig; } @Override public String getTableName() { return tableName; } @Override public String getHashKeyName() { if (hashKeyName == null) { hashKeyName = model.hashKey().name(); } return hashKeyName; } @Override public String getRangeKeyName() { if (rangeKeyName == null) { if (model.rangeKeyIfExists() == null) { rangeKeyName = NO_RANGE_KEY; } else { rangeKeyName = model.rangeKey().name(); } } if (rangeKeyName == NO_RANGE_KEY) { return null; } return rangeKeyName; } } private Map untransformAttributes( final AttributeTransformer.Parameters parameters ) { if (transformer != null) { return transformer.untransform(parameters); } else { return parameters.getAttributeValues(); } } private Map transformAttributes( final AttributeTransformer.Parameters parameters) { if (transformer != null) { return transformer.transform(parameters); } else { return parameters.getAttributeValues(); } } private Map transformAttributeUpdates( final Class clazz, final String tableName, final Map keys, final Map updateValues, final DynamoDBMapperConfig config ) { Map item = convertToItem(updateValues); HashSet keysAdded = new HashSet(); for (Map.Entry e : keys.entrySet()) { if (!item.containsKey(e.getKey())) { keysAdded.add(e.getKey()); item.put(e.getKey(), e.getValue()); } } AttributeTransformer.Parameters parameters = toParameters(item, true, clazz, tableName, config); String hashKey = parameters.getHashKeyName(); if (!item.containsKey(hashKey)) { item.put(hashKey, keys.get(hashKey)); } item = transformAttributes(parameters); for(Map.Entry entry: item.entrySet()) { if (keysAdded.contains(entry.getKey())) { // This was added in for context before calling // transformAttributes, but isn't actually being changed. continue; } AttributeValueUpdate update = updateValues.get(entry.getKey()); if (update != null) { StandardAttributeTypes.AttributeType.copyAll(entry.getValue(), update.getValue()); } else { updateValues.put(entry.getKey(), new AttributeValueUpdate(entry.getValue(), "PUT")); } } return updateValues; } private void pause(long delay) { if (delay <= 0) { return; } try { Thread.sleep(delay); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new AmazonClientException(e.getMessage(), e); } } /** * Returns a new map object that merges the two sets of expected value * conditions (user-specified or imposed by the internal implementation of * DynamoDBMapper). Internal assertion on an attribute will be overridden by * any user-specified condition on the same attribute. *

* Exception is thrown if the two sets of conditions cannot be combined * together. */ private static Map mergeExpectedAttributeValueConditions( Map internalAssertions, Map userProvidedConditions, String userProvidedConditionOperator) { // If any of the condition map is null, simply return a copy of the other one. if ((internalAssertions == null || internalAssertions.isEmpty()) && (userProvidedConditions == null || userProvidedConditions.isEmpty())) { return null; } else if (internalAssertions == null) { return new HashMap(userProvidedConditions); } else if (userProvidedConditions == null) { return new HashMap(internalAssertions); } // Start from a copy of the internal conditions Map mergedExpectedValues = new HashMap(internalAssertions); // Remove internal conditions that are going to be overlaid by user-provided ones. for (String attrName : userProvidedConditions.keySet()) { mergedExpectedValues.remove(attrName); } // All the generated internal conditions must be joined by AND. // Throw an exception if the user specifies an OR operator, and that the // internal conditions are not totally overlaid by the user-provided // ones. if ( ConditionalOperator.OR.toString().equals(userProvidedConditionOperator) && !mergedExpectedValues.isEmpty() ) { throw new IllegalArgumentException("Unable to assert the value of the fields " + mergedExpectedValues.keySet() + ", since the expected value conditions cannot be combined " + "with user-specified conditions joined by \"OR\". You can use SaveBehavior.CLOBBER to " + "skip the assertion on these fields."); } mergedExpectedValues.putAll(userProvidedConditions); return mergedExpectedValues; } static X applyUserAgent(X request) { request.getRequestClientOptions().appendUserAgent(USER_AGENT); return request; } static X applyBatchOperationUserAgent(X request) { request.getRequestClientOptions().appendUserAgent(USER_AGENT_BATCH_OPERATION); return request; } /** * The return type of batchWrite, batchDelete and batchSave. It contains the information about the unprocessed items * and the exception causing the failure. * */ public static class FailedBatch { private Map> unprocessedItems; private Exception exception; public void setUnprocessedItems(Map> unprocessedItems) { this.unprocessedItems = unprocessedItems; } public Map> getUnprocessedItems() { return unprocessedItems; } public void setException(Exception excetpion) { this.exception = excetpion; } public Exception getException() { return exception; } } @Override public S3ClientCache getS3ClientCache() { return s3cc; } @Override public S3Link createS3Link(String bucketName, String key) { return createS3Link(null, bucketName, key); } @Override public S3Link createS3Link(Region s3region, String bucketName, String key) { if ( s3cc == null ) { throw new IllegalStateException("Mapper must be constructed with S3 AWS Credentials to create S3Link"); } return new S3Link(s3cc, s3region, bucketName , key); } @Override public CreateTableRequest generateCreateTableRequest(Class clazz) { final DynamoDBMapperTableModel model = getTableModel((Class)clazz, config); CreateTableRequest createTableRequest = new CreateTableRequest(); createTableRequest.setTableName(internalGetTableName(clazz, null, config)); createTableRequest.withKeySchema(new KeySchemaElement(model.hashKey().name(), HASH)); if (model.rangeKeyIfExists() != null) { createTableRequest.withKeySchema(new KeySchemaElement(model.rangeKey().name(), RANGE)); } final Collection gsis = model.globalSecondaryIndexes(); if (gsis.isEmpty() == false) { createTableRequest.setGlobalSecondaryIndexes(gsis); } final Collection lsis = model.localSecondaryIndexes(); if (lsis.isEmpty() == false ) { createTableRequest.setLocalSecondaryIndexes(lsis); } for (final DynamoDBMapperFieldModel field : model.fields()) { if (field.anyKey()) { createTableRequest.withAttributeDefinitions(field.definition()); } } return createTableRequest; } @Override public DeleteTableRequest generateDeleteTableRequest(Class clazz) { DeleteTableRequest deleteTableRequest = new DeleteTableRequest(); deleteTableRequest.setTableName(internalGetTableName(clazz, null, config)); return deleteTableRequest; } /** * Creates a new table mapper using this mapper to perform operations. * @param The object type which this mapper operates. * @param The hash key value type. * @param The range key value type; use ? if no range key. * @param clazz The object class. * @return The table mapper. */ public DynamoDBTableMapper newTableMapper(Class clazz) { return new DynamoDBTableMapper(getTableModel(clazz, config), this, this.db); } }