All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapper Maven / Gradle / Ivy

Go to download

The Amazon Web Services SDK for Java provides Java APIs for building software on AWS' cost-effective, scalable, and reliable infrastructure products. The AWS Java SDK allows developers to code against APIs for all of Amazon's infrastructure web services (Amazon S3, Amazon EC2, Amazon SQS, Amazon Relational Database Service, Amazon AutoScaling, etc).

The newest version!
/*
 * Copyright 2011-2014 Amazon Technologies, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *    http://aws.amazon.com/apache2.0
 *
 * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
 * OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and
 * limitations under the License.
 */
package com.amazonaws.services.dynamodbv2.datamodeling;

import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.retry.RetryUtils;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.ConsistentReads;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.PaginationLoadingStrategy;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig.SaveBehavior;
import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBTableSchemaParser.TableIndexesInfo;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemResult;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult;
import com.amazonaws.services.dynamodbv2.model.ComparisonOperator;
import com.amazonaws.services.dynamodbv2.model.Condition;
import com.amazonaws.services.dynamodbv2.model.ConditionalCheckFailedException;
import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteItemRequest;
import com.amazonaws.services.dynamodbv2.model.DeleteRequest;
import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue;
import com.amazonaws.services.dynamodbv2.model.GetItemRequest;
import com.amazonaws.services.dynamodbv2.model.GetItemResult;
import com.amazonaws.services.dynamodbv2.model.KeysAndAttributes;
import com.amazonaws.services.dynamodbv2.model.PutItemRequest;
import com.amazonaws.services.dynamodbv2.model.PutRequest;
import com.amazonaws.services.dynamodbv2.model.QueryRequest;
import com.amazonaws.services.dynamodbv2.model.QueryResult;
import com.amazonaws.services.dynamodbv2.model.ScanRequest;
import com.amazonaws.services.dynamodbv2.model.ScanResult;
import com.amazonaws.services.dynamodbv2.model.Select;
import com.amazonaws.services.dynamodbv2.model.UpdateItemRequest;
import com.amazonaws.services.dynamodbv2.model.WriteRequest;
import com.amazonaws.services.s3.model.Region;
import com.amazonaws.util.VersionInfoUtils;

/**
 * Object mapper for domain-object interaction with DynamoDB.
 * 

* To use, define a domain class that represents an item in a DynamoDB table and * annotate it with the annotations found in the * com.amazonaws.services.dynamodbv2.datamodeling package. In order to allow the * mapper to correctly persist the data, each modeled property in the domain * class should be accessible via getter and setter methods, and each property * annotation should be either applied to the getter method or the class field. * A minimal example using getter annotations: * *

 * @DynamoDBTable(tableName = "TestTable")
 * public class TestClass {
 *
 *     private Long key;
 *     private double rangeKey;
 *     private Long version;
 *
 *     private Set<Integer> integerSetAttribute;
 *
 *     @DynamoDBHashKey
 *     public Long getKey() {
 *         return key;
 *     }
 *
 *     public void setKey(Long key) {
 *         this.key = key;
 *     }
 *
 *     @DynamoDBRangeKey
 *     public double getRangeKey() {
 *         return rangeKey;
 *     }
 *
 *     public void setRangeKey(double rangeKey) {
 *         this.rangeKey = rangeKey;
 *     }
 *
 *     @DynamoDBAttribute(attributeName = "integerSetAttribute")
 *     public Set<Integer> getIntegerAttribute() {
 *         return integerSetAttribute;
 *     }
 *
 *     public void setIntegerAttribute(Set<Integer> integerAttribute) {
 *         this.integerSetAttribute = integerAttribute;
 *     }
 *
 *     @DynamoDBVersionAttribute
 *     public Long getVersion() {
 *         return version;
 *     }
 *
 *     public void setVersion(Long version) {
 *         this.version = version;
 *     }
 * }
 * 
*

* Save instances of annotated classes to DynamoDB, retrieve them, and delete * them using the {@link DynamoDBMapper} class, as in the following example. * *

 * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
 * Long hashKey = 105L;
 * double rangeKey = 1.0d;
 * TestClass obj = mapper.load(TestClass.class, hashKey, rangeKey);
 * obj.getIntegerAttribute().add(42);
 * mapper.save(obj);
 * mapper.delete(obj);
 * 
*

* When using the save, load, and delete methods, {@link DynamoDBMapper} will * throw {@link DynamoDBMappingException}s to indicate that domain classes are * incorrectly annotated or otherwise incompatible with this class. Service * exceptions will always be propagated as {@link AmazonClientException}, and * DynamoDB-specific subclasses such as {@link ConditionalCheckFailedException} * will be used when possible. *

* This class is thread-safe and can be shared between threads. It's also very * lightweight, so it doesn't need to be. * * @see DynamoDBTable * @see DynamoDBHashKey * @see DynamoDBRangeKey * @see DynamoDBAutoGeneratedKey * @see DynamoDBAttribute * @see DynamoDBVersionAttribute * @see DynamoDBIgnore * @see DynamoDBMarshalling * @see DynamoDBMapperConfig */ public class DynamoDBMapper { private final S3ClientCache s3cc; private final AmazonDynamoDB db; private final DynamoDBMapperConfig config; private final DynamoDBReflector reflector = new DynamoDBReflector(); private final DynamoDBTableSchemaParser schemaParser = new DynamoDBTableSchemaParser(); private final AttributeTransformer transformer; /** The max back off time for batch write */ static final long MAX_BACKOFF_IN_MILLISECONDS = 1000 * 3; /** * This retry count is applicable only when every batch get item request * results in no data retrieved from server and the un processed keys is * same as request items */ static final int BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS = 5; /** * User agent for requests made using the {@link DynamoDBMapper}. */ private static final String USER_AGENT = DynamoDBMapper.class.getName() + "/" + VersionInfoUtils.getVersion(); private static final String NO_RANGE_KEY = new String(); private static final Log log = LogFactory.getLog(DynamoDBMapper.class); /** * Constructs a new mapper with the service object given, using the default * configuration. * * @param dynamoDB * The service object to use for all service calls. * @see DynamoDBMapperConfig#DEFAULT */ public DynamoDBMapper(final AmazonDynamoDB dynamoDB) { this(dynamoDB, DynamoDBMapperConfig.DEFAULT, null, null); } /** * Constructs a new mapper with the service object and configuration given. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config) { this(dynamoDB, config, null, null); } /** * Constructs a new mapper with the service object and S3 client cache * given, using the default configuration. * * @param ddb * The service object to use for all service calls. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. * @see DynamoDBMapperConfig#DEFAULT */ public DynamoDBMapper( final AmazonDynamoDB ddb, final AWSCredentialsProvider s3CredentialProvider) { this(ddb, DynamoDBMapperConfig.DEFAULT, s3CredentialProvider); } /** * Constructs a new mapper with the given service object, configuration, * and transform hook. * * @param dynamoDB * the service object to use for all service calls * @param config * the default configuration to use for all service calls. It * can be overridden on a per-operation basis * @param transformer * The custom attribute transformer to invoke when serializing or * deserializing an object. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AttributeTransformer transformer) { this(dynamoDB, config, transformer, null); } /** * Constructs a new mapper with the service object, configuration, and S3 * client cache given. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AWSCredentialsProvider s3CredentialProvider) { this(dynamoDB, config, null, validate(s3CredentialProvider)); } /** * Throws an exception if the given credentials provider is {@code null}. */ private static AWSCredentialsProvider validate( final AWSCredentialsProvider provider) { if (provider == null) { throw new IllegalArgumentException( "s3 credentials provider must not be null"); } return provider; } /** * Constructor with all parameters. * * @param dynamoDB * The service object to use for all service calls. * @param config * The default configuration to use for all service calls. It can * be overridden on a per-operation basis. * @param transformer * The custom attribute transformer to invoke when serializing or * deserializing an object. * @param s3CredentialProvider * The credentials provider for accessing S3. * Relevant only if {@link S3Link} is involved. */ public DynamoDBMapper( final AmazonDynamoDB dynamoDB, final DynamoDBMapperConfig config, final AttributeTransformer transformer, final AWSCredentialsProvider s3CredentialsProvider) { this.db = dynamoDB; this.config = config; this.transformer = transformer; if (s3CredentialsProvider == null) { this.s3cc = null; } else { this.s3cc = new S3ClientCache(s3CredentialsProvider.getCredentials()); } } /** * Loads an object with the hash key given and a configuration override. * This configuration overrides the default provided at object construction. * * @see DynamoDBMapper#load(Class, Object, Object, DynamoDBMapperConfig) */ public T load(Class clazz, Object hashKey, DynamoDBMapperConfig config) { return load(clazz, hashKey, null, config); } /** * Loads an object with the hash key given, using the default configuration. * * @see DynamoDBMapper#load(Class, Object, Object, DynamoDBMapperConfig) */ public T load(Class clazz, Object hashKey) { return load(clazz, hashKey, null, config); } /** * Loads an object with a hash and range key, using the default * configuration. * * @see DynamoDBMapper#load(Class, Object, Object, DynamoDBMapperConfig) */ public T load(Class clazz, Object hashKey, Object rangeKey) { return load(clazz, hashKey, rangeKey, config); } /** * Returns an object whose keys match those of the prototype key object given, * or null if no such item exists. * * @param keyObject * An object of the class to load with the keys values to match. * * @see DynamoDBMapper#load(Object, DynamoDBMapperConfig) */ public T load(T keyObject) { return load(keyObject, this.config); } /** * Returns an object whose keys match those of the prototype key object given, * or null if no such item exists. * * @param keyObject * An object of the class to load with the keys values to match. * @param config * Configuration for the service call to retrieve the object from * DynamoDB. This configuration overrides the default given at * construction. */ public T load(T keyObject, DynamoDBMapperConfig config) { @SuppressWarnings("unchecked") Class clazz = (Class) keyObject.getClass(); config = mergeConfig(config); String tableName = getTableName(clazz, config); GetItemRequest rq = new GetItemRequest() .withRequestMetricCollector(config.getRequestMetricCollector()); Map key = getKey(keyObject, clazz); rq.setKey(key); rq.setTableName(tableName); rq.setConsistentRead(config.getConsistentReads() == ConsistentReads.CONSISTENT); GetItemResult item = db.getItem(applyUserAgent(rq)); Map itemAttributes = item.getItem(); if ( itemAttributes == null ) { return null; } T object = marshalIntoObject(toParameters(itemAttributes, clazz, config)); return object; } /** * Returns a key map for the key object given. * * @param keyObject * The key object, corresponding to an item in a dynamo table. */ @SuppressWarnings("unchecked") private Map getKey(T keyObject) { return getKey(keyObject, (Class)keyObject.getClass()); } private Map getKey(T keyObject, Class clazz) { Map key = new HashMap(); for (Method keyGetter : reflector.getPrimaryKeyGetters(clazz)) { Object getterResult = safeInvoke(keyGetter, keyObject); AttributeValue keyAttributeValue = getSimpleAttributeValue(keyGetter, getterResult); if (keyAttributeValue == null) { throw new DynamoDBMappingException("Null key found for " + keyGetter); } key.put(reflector.getAttributeName(keyGetter), keyAttributeValue); } if ( key.isEmpty() ) { throw new DynamoDBMappingException("Class must be annotated with " + DynamoDBHashKey.class + " and " + DynamoDBRangeKey.class); } return key; } /** * Returns an object with the given hash key, or null if no such object * exists. * * @param clazz * The class to load, corresponding to a DynamoDB table. * @param hashKey * The key of the object. * @param rangeKey * The range key of the object, or null for tables without a * range key. * @param config * Configuration for the service call to retrieve the object from * DynamoDB. This configuration overrides the default given at * construction. */ public T load(Class clazz, Object hashKey, Object rangeKey, DynamoDBMapperConfig config) { config = mergeConfig(config); T keyObject = createKeyObject(clazz, hashKey, rangeKey); return load(keyObject, config); } /** * Creates a key prototype object for the class given with the single hash and range key given. */ private T createKeyObject(Class clazz, Object hashKey, Object rangeKey) { T keyObject = null; try { keyObject = clazz.newInstance(); } catch ( Exception e ) { throw new DynamoDBMappingException("Failed to instantiate class", e); } boolean seenHashKey = false; boolean seenRangeKey = false; for ( Method getter : reflector.getPrimaryKeyGetters(clazz) ) { if ( ReflectionUtils.getterOrFieldHasAnnotation(getter, DynamoDBHashKey.class) ) { if ( seenHashKey ) { throw new DynamoDBMappingException("Found more than one method annotated with " + DynamoDBHashKey.class + " for class " + clazz + ". Use load(Object) for tables with more than a single hash and range key."); } seenHashKey = true; safeInvoke(reflector.getSetter(getter), keyObject, hashKey); } else if ( ReflectionUtils.getterOrFieldHasAnnotation(getter, DynamoDBRangeKey.class) ) { if ( seenRangeKey ) { throw new DynamoDBMappingException("Found more than one method annotated with " + DynamoDBRangeKey.class + " for class " + clazz + ". Use load(Object) for tables with more than a single hash and range key."); } seenRangeKey = true; safeInvoke(reflector.getSetter(getter), keyObject, rangeKey); } } if ( !seenHashKey ) { throw new DynamoDBMappingException("No method annotated with " + DynamoDBHashKey.class + " for class " + clazz + "."); } else if ( rangeKey != null && !seenRangeKey ) { throw new DynamoDBMappingException("No method annotated with " + DynamoDBRangeKey.class + " for class " + clazz + "."); } return keyObject; } /** * Returns a map of attribute name to EQ condition for the key prototype * object given. This method considers attributes annotated with either * {@link DynamoDBHashKey} or {@link DynamoDBIndexHashKey}. * * @param obj * The prototype object that includes the hash key value. * @return A map of hash key attribute name to EQ condition for the key * prototype object, or an empty map if obj is null. */ private Map getHashKeyEqualsConditions(Object obj) { Map conditions = new HashMap(); if (obj != null) { for ( Method getter : reflector.getRelevantGetters(obj.getClass()) ) { if ( ReflectionUtils.getterOrFieldHasAnnotation(getter, DynamoDBHashKey.class) || ReflectionUtils.getterOrFieldHasAnnotation(getter, DynamoDBIndexHashKey.class) ) { Object getterReturnResult = safeInvoke(getter, obj, (Object[])null); if (getterReturnResult != null) { conditions.put( reflector.getAttributeName(getter), new Condition().withComparisonOperator(ComparisonOperator.EQ).withAttributeValueList( getSimpleAttributeValue(getter, getterReturnResult))); } } } } return conditions; } /** * Returns the table name for the class given. */ protected final String getTableName(final Class clazz, final DynamoDBMapperConfig config) { return getTableName(clazz, config, reflector); } static String getTableName(final Class clazz, final DynamoDBMapperConfig config, final DynamoDBReflector reflector) { DynamoDBTable table = reflector.getTable(clazz); String tableName = table.tableName(); if ( config.getTableNameOverride() != null ) { if ( config.getTableNameOverride().getTableName() != null ) { tableName = config.getTableNameOverride().getTableName(); } else { tableName = config.getTableNameOverride().getTableNamePrefix() + tableName; } } return tableName; } /** * A replacement for {@link #marshallIntoObject(Class, Map)} that takes * extra parameters to tunnel through to {@code privateMarshalIntoObject}. *

* Once {@code marshallIntoObject} is removed, this method will directly * call {@code privateMarshalIntoObject}. */ private T marshalIntoObject( final AttributeTransformer.Parameters parameters ) { return marshallIntoObject( parameters.getModelClass(), MapAnd.wrap(parameters.getAttributeValues(), parameters)); } /** * Creates and fills in the attributes on an instance of the class given * with the attributes given. *

* This is accomplished by looking for getter methods annotated with an * appropriate annotation, then looking for matching attribute names in the * item attribute map. *

* This method has been marked deprecated because it does not allow * load/query/scan to pass through their DynamoDBMapperConfig parameter, * which is needed by some implementations of {@code AttributeTransformer}. * In a future version of the SDK, load/query/scan will be changed to * directly call privateMarshalIntoObject, and will no longer call this * method. *

* If you are extending DynamoDBMapper and overriding this method to * customize how the mapper unmarshals POJOs from a raw DynamoDB item, * please switch to using an AttributeTransformer (or open a GitHub * issue if you need to fully control the unmarshalling process, and we'll * figure out a better way to expose such a hook). *

* If you're simply calling this method, it will continue to be available * for the forseeable future - feel free to ignore the @Deprecated tag. * * @param clazz * The class to instantiate and hydrate * @param itemAttributes * The set of item attributes, keyed by attribute name. * @deprecated as an extension point for adding custom unmarshalling */ @Deprecated public T marshallIntoObject(Class clazz, Map itemAttributes) { if (itemAttributes instanceof MapAnd) { @SuppressWarnings("unchecked") AttributeTransformer.Parameters parameters = ((MapAnd>) itemAttributes) .getExtra(); return privateMarshalIntoObject(parameters); } else { // Called via some unexpected external codepath; use the class-level // config. return privateMarshalIntoObject( toParameters(itemAttributes, clazz, this.config)); } } /** * The one true implementation of marshalIntoObject. */ private T privateMarshalIntoObject( final AttributeTransformer.Parameters parameters) { T toReturn = null; try { toReturn = parameters.getModelClass().newInstance(); } catch ( InstantiationException e ) { throw new DynamoDBMappingException("Failed to instantiate new instance of class", e); } catch ( IllegalAccessException e ) { throw new DynamoDBMappingException("Failed to instantiate new instance of class", e); } if ( parameters.getAttributeValues() == null || parameters.getAttributeValues().isEmpty() ) { return toReturn; } Map result = untransformAttributes(parameters); for ( Method m : reflector.getRelevantGetters(parameters.getModelClass()) ) { String attributeName = reflector.getAttributeName(m); if ( result.containsKey(attributeName) ) { setValue(toReturn, m, result.get(attributeName)); } } return toReturn; } /** * Unmarshalls the list of item attributes into objects of type clazz. *

* This method has been marked deprecated because it does not allow * query/scan to pass through their DynamoDBMapperConfig parameter, * which is needed by some implementations of {@code AttributeTransformer}. * In a future version of the SDK, query/scan will be changed to directly * call privateMarshalIntoObjects, and will no longer call this method. *

* If you are extending DynamoDBMapper and overriding this method to * customize how the mapper unmarshals POJOs from raw DynamoDB items, * please switch to using an AttributeTransformer (or open a GitHub * issue if you need to fully control the unmarshalling process, and we'll * figure out a better way to expose such a hook). *

* If you're simply calling this method, it will continue to be available * for the forseeable future - feel free to ignore the @Deprecated tag. * * @see DynamoDBMapper#marshallIntoObject(Class, Map) * @deprecated as an extension point for adding custom unmarshalling */ @Deprecated public List marshallIntoObjects(Class clazz, List> itemAttributes) { List result = new ArrayList(itemAttributes.size()); for (Map item : itemAttributes) { result.add(marshallIntoObject(clazz, item)); } return result; } /** * A replacement for {@link #marshallIntoObjects(Class, List)} that takes * an extra set of parameters to be tunneled through to * {@code privateMarshalIntoObject} (if nothing along the way is * overridden). It's package-private because some of the Paginated*List * classes call back into it, but final because no one, even in this * package, should ever override it. *

* In the future, when the deprecated {@code marshallIntoObjects} is * removed, this method will be changed to directly call * {@code privateMarshalIntoObject}. */ final List marshalIntoObjects( final List> parameters ) { if (parameters.isEmpty()) { return Collections.emptyList(); } Class clazz = parameters.get(0).getModelClass(); List> list = new ArrayList>(parameters.size()); for (AttributeTransformer.Parameters entry : parameters) { list.add(MapAnd.wrap(entry.getAttributeValues(), entry)); } return marshallIntoObjects(clazz, list); } /** * Sets the value in the return object corresponding to the service result. */ private void setValue(final T toReturn, final Method getter, AttributeValue value) { Method setter = reflector.getSetter(getter); ArgumentUnmarshaller unmarhsaller = reflector.getArgumentUnmarshaller(toReturn, getter, setter, s3cc); unmarhsaller.typeCheck(value, setter); Object argument; try { argument = unmarhsaller.unmarshall(value); } catch ( IllegalArgumentException e ) { throw new DynamoDBMappingException("Couldn't unmarshall value " + value + " for " + setter, e); } catch ( ParseException e ) { throw new DynamoDBMappingException("Error attempting to parse date string " + value + " for "+ setter, e); } safeInvoke(setter, toReturn, argument); } /** * Returns an {@link AttributeValue} corresponding to the getter and return * result given, treating it as a non-versioned attribute. */ private AttributeValue getSimpleAttributeValue(final Method getter, final Object getterReturnResult) { if ( getterReturnResult == null ) return null; ArgumentMarshaller marshaller = reflector.getArgumentMarshaller(getter); return marshaller.marshall(getterReturnResult); } /** * Saves the object given into DynamoDB, using the default configuration. * * @see DynamoDBMapper#save(Object, DynamoDBSaveExpression, DynamoDBMapperConfig) */ public void save(T object) { save(object, null, config); } /** * Saves the object given into DynamoDB, using the default configuration and the specified saveExpression. * * @see DynamoDBMapper#save(Object, DynamoDBSaveExpression, DynamoDBMapperConfig) */ public void save(T object, DynamoDBSaveExpression saveExpression) { save(object, saveExpression, config); } private boolean needAutoGenerateAssignableKey(Class clazz, Object object) { Collection keyGetters = reflector.getPrimaryKeyGetters(clazz); boolean forcePut = false; /* * Determine if there are any auto-assigned keys to assign. If so, force * a put and assign the keys. */ boolean hashKeyGetterFound = false; for ( Method method : keyGetters ) { Object getterResult = safeInvoke(method, object); if ( getterResult == null && reflector.isAssignableKey(method) ) { forcePut = true; } if ( ReflectionUtils.getterOrFieldHasAnnotation(method, DynamoDBHashKey.class) ) { hashKeyGetterFound = true; } } if ( !hashKeyGetterFound ) { throw new DynamoDBMappingException("No " + DynamoDBHashKey.class + " annotation found in class " + clazz); } return forcePut; } /** * Saves the object given into DynamoDB, using the specified configuration. * * @see DynamoDBMapper#save(Object, DynamoDBSaveExpression, DynamoDBMapperConfig) */ public void save(T object, DynamoDBMapperConfig config) { save(object, null, config); } /** * Saves an item in DynamoDB. The service method used is determined by the * {@link DynamoDBMapperConfig#getSaveBehavior()} value, to use either * {@link AmazonDynamoDB#putItem(PutItemRequest)} or * {@link AmazonDynamoDB#updateItem(UpdateItemRequest)}: *

    *
  • UPDATE (default) : UPDATE will not affect unmodeled attributes * on a save operation and a null value for the modeled attribute will * remove it from that item in DynamoDB. Because of the limitation of * updateItem request, the implementation of UPDATE will send a putItem * request when a key-only object is being saved, and it will send another * updateItem request if the given key(s) already exists in the table.
  • *
  • UPDATE_SKIP_NULL_ATTRIBUTES : Similar to UPDATE except that it * ignores any null value attribute(s) and will NOT remove them from that * item in DynamoDB. It also guarantees to send only one single updateItem * request, no matter the object is key-only or not.
  • *
  • CLOBBER : CLOBBER will clear and replace all attributes, * included unmodeled ones, (delete and recreate) on save. Versioned field * constraints will also be disregarded.
  • *
* * * Any options specified in the saveExpression parameter will be overlaid on * any constraints due to versioned attributes. * * @param object * The object to save into DynamoDB * @param saveExpression * The options to apply to this save request * @param config * The configuration to use, which overrides the default provided * at object construction. * * @see DynamoDBMapperConfig.SaveBehavior */ public void save(T object, DynamoDBSaveExpression saveExpression, final DynamoDBMapperConfig config) { final DynamoDBMapperConfig finalConfig = mergeConfig(config); @SuppressWarnings("unchecked") Class clazz = (Class) object.getClass(); String tableName = getTableName(clazz, finalConfig); final Map userProvidedExpectedValues = (saveExpression == null) ? null : saveExpression.getExpected(); /* * We force a putItem request instead of updateItem request either when * CLOBBER is configured, or part of the primary key of the object needs * to be auto-generated. */ boolean forcePut = (finalConfig.getSaveBehavior() == SaveBehavior.CLOBBER) || needAutoGenerateAssignableKey(clazz, object); SaveObjectHandler saveObjectHandler; if (forcePut) { saveObjectHandler = this.new SaveObjectHandler(clazz, object, tableName, finalConfig.getSaveBehavior(), userProvidedExpectedValues) { @Override protected void onKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue) { /* Treat key values as common attribute value updates. */ getAttributeValueUpdates().put(attributeName, new AttributeValueUpdate().withValue(keyAttributeValue) .withAction("PUT")); } /* Use default implementation of onNonKeyAttribute(...) */ @Override protected void onNullNonKeyAttribute(String attributeName) { /* When doing a force put, we can safely ignore the null-valued attributes. */ return; } @Override protected void executeLowLevelRequest(boolean onlyKeyAttributeSpecified) { /* Send a putItem request */ Map attributeValues = convertToItem(getAttributeValueUpdates()); attributeValues = transformAttributes( toParameters(attributeValues, this.clazz, finalConfig)); PutItemRequest req = new PutItemRequest() .withTableName(getTableName()) .withItem(attributeValues) .withExpected(getExpectedAttributeValues()) .withRequestMetricCollector( finalConfig.getRequestMetricCollector()); db.putItem(applyUserAgent(req)); } }; } else { saveObjectHandler = this.new SaveObjectHandler(clazz, object, tableName, finalConfig.getSaveBehavior(), userProvidedExpectedValues) { @Override protected void onKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue) { /* Put it in the key collection which is later used in the updateItem request. */ getKeyAttributeValues().put(attributeName, keyAttributeValue); } @Override protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) { /* If it's a set attribute and the mapper is configured with APPEND_SET, * we do an "ADD" update instead of the default "PUT". */ if (getLocalSaveBehavior() == SaveBehavior.APPEND_SET) { if (currentValue.getBS() != null || currentValue.getNS() != null || currentValue.getSS() != null) { getAttributeValueUpdates().put( attributeName, new AttributeValueUpdate().withValue( currentValue).withAction("ADD")); return; } } /* Otherwise, we do the default "PUT" update. */ super.onNonKeyAttribute(attributeName, currentValue); } @Override protected void onNullNonKeyAttribute(String attributeName) { /* * If UPDATE_SKIP_NULL_ATTRIBUTES or APPEND_SET is * configured, we don't delete null value attributes. */ if (getLocalSaveBehavior() == SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES || getLocalSaveBehavior() == SaveBehavior.APPEND_SET) { return; } else { /* Delete attributes that are set as null in the object. */ getAttributeValueUpdates() .put(attributeName, new AttributeValueUpdate() .withAction("DELETE")); } } @Override protected void executeLowLevelRequest(boolean onlyKeyAttributeSpecified) { /* * Do a putItem when a key-only object is being saved with * UPDATE configuration. * Here we only need to consider UPDATE configuration, since * only UPDATE could cause the problematic situation of * updating an existing primary key with "DELETE" action on * non-key attributes. See the javadoc of keyOnlyPut(...) * for more detail. */ boolean doUpdateItem = true; if (onlyKeyAttributeSpecified && getLocalSaveBehavior() == SaveBehavior.UPDATE) { doUpdateItem = false; try { keyOnlyPut(this.clazz, this.object, getTableName(), reflector.getPrimaryHashKeyGetter(this.clazz), reflector.getPrimaryRangeKeyGetter(this.clazz), userProvidedExpectedValues, finalConfig); } catch (AmazonServiceException ase) { if (ase.getErrorCode().equals( "ConditionalCheckFailedException")) { /* * If another item with the given keys is found * in the table, we follow up an updateItem * request. */ doUpdateItem = true; } else { throw ase; } } } if ( doUpdateItem ) { /* Send an updateItem request. */ UpdateItemRequest req = new UpdateItemRequest() .withTableName(getTableName()) .withKey(getKeyAttributeValues()) .withAttributeUpdates( transformAttributeUpdates(this.clazz, getKeyAttributeValues(), getAttributeValueUpdates(), finalConfig)) .withExpected(getExpectedAttributeValues()) .withRequestMetricCollector( finalConfig.getRequestMetricCollector()); db.updateItem(applyUserAgent(req)); } } }; } saveObjectHandler.execute(); } /** * The handler for saving object using DynamoDBMapper. Caller should * implement the abstract methods to provide the expected behavior on each * scenario, and this handler will take care of all the other basic workflow * and common operations. */ protected abstract class SaveObjectHandler { protected final Object object; protected final Class clazz; private String tableName; private SaveBehavior saveBehavior; private Map key; private Map updateValues; private Map expectedValues; private List inMemoryUpdates; private boolean nonKeyAttributePresent; /** * Constructs a handler for saving the specified model object. * * @param object The model object to be saved. * @param clazz The domain class of the object. * @param tableName The table name. * @param userProvidedExpectedValues Any expected values that should be applied to the save */ public SaveObjectHandler(Class clazz, Object object, String tableName, SaveBehavior saveBehavior, Map userProvidedExpectedValues) { this.clazz = clazz; this.object = object; this.tableName = tableName; this.saveBehavior = saveBehavior; updateValues = new HashMap(); expectedValues = new HashMap(); if(userProvidedExpectedValues != null){ expectedValues.putAll(userProvidedExpectedValues); } inMemoryUpdates = new LinkedList(); key = new HashMap(); nonKeyAttributePresent = false; } /** * The general workflow of a save operation. */ public void execute() { Collection keyGetters = reflector.getPrimaryKeyGetters(clazz); /* * First handle keys */ for ( Method method : keyGetters ) { Object getterResult = safeInvoke(method, object); String attributeName = reflector.getAttributeName(method); if ( getterResult == null && reflector.isAssignableKey(method) ) { onAutoGenerateAssignableKey(method, attributeName); } else { AttributeValue newAttributeValue = getSimpleAttributeValue(method, getterResult); if ( newAttributeValue == null ) { throw new DynamoDBMappingException("Null or empty value for key: " + method); } onKeyAttributeValue(attributeName, newAttributeValue); } } /* * Next construct an update for every non-key property */ for ( Method method : reflector.getRelevantGetters(clazz) ) { // Skip any key methods, since they are handled separately if ( keyGetters.contains(method) ) continue; Object getterResult = safeInvoke(method, object); String attributeName = reflector.getAttributeName(method); /* * If this is a versioned field, update it */ if ( reflector.isVersionAttributeGetter(method) ) { onVersionAttribute(method, getterResult, attributeName); nonKeyAttributePresent = true; } /* * Otherwise apply the update value for this attribute. */ else { AttributeValue currentValue = getSimpleAttributeValue(method, getterResult); if ( currentValue != null ) { onNonKeyAttribute(attributeName, currentValue); nonKeyAttributePresent = true; } else { onNullNonKeyAttribute(attributeName); } } } /* * Execute the implementation of the low level request. */ executeLowLevelRequest(! nonKeyAttributePresent); /* * Finally, after the service call has succeeded, update the * in-memory object with new field values as appropriate. This * currently takes into account of auto-generated keys and versioned * attributes. */ for ( ValueUpdate update : inMemoryUpdates ) { update.apply(); } } /** * Implement this method to do the necessary operations when a key * attribute is set with some value. * * @param attributeName * The name of the key attribute. * @param keyAttributeValue * The AttributeValue of the key attribute as specified in * the object. */ protected abstract void onKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue); /** * Implement this method for necessary operations when a non-key * attribute is set a non-null value in the object. * The default implementation simply adds a "PUT" update for the given attribute. * * @param attributeName * The name of the non-key attribute. * @param currentValue * The updated value of the given attribute. */ protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) { updateValues.put(attributeName, new AttributeValueUpdate() .withValue(currentValue).withAction("PUT")); } /** * Implement this method for necessary operations when a non-key * attribute is set null in the object. * * @param attributeName * The name of the non-key attribute. */ protected abstract void onNullNonKeyAttribute(String attributeName); /** * Implement this method to send the low-level request that is necessary * to complete the save operation. * * @param onlyKeyAttributeSpecified * Whether the object to be saved is only specified with key * attributes. */ protected abstract void executeLowLevelRequest(boolean onlyKeyAttributeSpecified); /** Get the SaveBehavior used locally for this save operation. **/ protected SaveBehavior getLocalSaveBehavior() { return saveBehavior; } /** Get the table name **/ protected String getTableName() { return tableName; } /** Get the map of all the specified key of the saved object. **/ protected Map getKeyAttributeValues() { return key; } /** Get the map of AttributeValueUpdate on each modeled attribute. **/ protected Map getAttributeValueUpdates() { return updateValues; } /** Get the map of ExpectedAttributeValue on each modeled attribute. **/ protected Map getExpectedAttributeValues() { return expectedValues; } /** Get the list of all the necessary in-memory update on the object. **/ protected List getInMemoryUpdates() { return inMemoryUpdates; } private void onAutoGenerateAssignableKey(Method method, String attributeName) { AttributeValue newVersionValue = getAutoGeneratedKeyAttributeValue(method, null); updateValues.put(attributeName, new AttributeValueUpdate().withAction("PUT").withValue(newVersionValue)); inMemoryUpdates.add(new ValueUpdate(method, newVersionValue, object)); if ( getLocalSaveBehavior() != SaveBehavior.CLOBBER && !expectedValues.containsKey(attributeName)) { // Add an expect clause to make sure that the item // doesn't already exist, since it's supposed to be new ExpectedAttributeValue expected = new ExpectedAttributeValue(); expected.setExists(false); expectedValues.put(attributeName, expected); } } private void onVersionAttribute(Method method, Object getterResult, String attributeName) { if ( getLocalSaveBehavior() != SaveBehavior.CLOBBER && !expectedValues.containsKey(attributeName)) { // First establish the expected (current) value for the // update call ExpectedAttributeValue expected = new ExpectedAttributeValue(); // For new objects, insist that the value doesn't exist. // For existing ones, insist it has the old value. AttributeValue currentValue = getSimpleAttributeValue(method, getterResult); expected.setExists(currentValue != null); if ( currentValue != null ) { expected.setValue(currentValue); } expectedValues.put(attributeName, expected); } AttributeValue newVersionValue = getVersionAttributeValue(method, getterResult); updateValues .put(attributeName, new AttributeValueUpdate().withAction("PUT").withValue(newVersionValue)); inMemoryUpdates.add(new ValueUpdate(method, newVersionValue, object)); } } /** * Edge case to deal with the problem reported here: * https://forums.aws.amazon.com/thread.jspa?threadID=86798&tstart=25 *

* DynamoDB fails silently on updateItem request that *

    *
  • is specified with a primary key that does not exist in the table
  • *
  • and contains only non-"PUT" AttributeValueUpdate on any non-key * attribute.
  • *
*

* So we have to do a putItem when a key-only object is being saved with * UPDATE configuration. In order to make sure this putItem request won't * replace any existing item in the table, we also insist that an item with * the key(s) given doesn't already exist. This isn't perfect, but we shouldn't * be doing a putItem at all in this case, so it's the best we can do. * * @param config never null */ private void keyOnlyPut( Class clazz, Object object, String tableName, Method hashKeyGetter, Method rangeKeyGetter, Map userProvidedExpectedValues, DynamoDBMapperConfig config) { Map attributes = new HashMap(); Map expectedValues = new HashMap(); String hashKeyAttributeName = reflector.getAttributeName(hashKeyGetter); Object hashGetterResult = safeInvoke(hashKeyGetter, object); attributes.put(hashKeyAttributeName, getSimpleAttributeValue(hashKeyGetter, hashGetterResult)); expectedValues.put(hashKeyAttributeName, new ExpectedAttributeValue().withExists(false)); if (rangeKeyGetter != null) { String rangeKeyAttributeName = reflector.getAttributeName(rangeKeyGetter); Object rangeGetterResult = safeInvoke(rangeKeyGetter, object); attributes.put(rangeKeyAttributeName, getSimpleAttributeValue(rangeKeyGetter, rangeGetterResult)); expectedValues.put(rangeKeyAttributeName, new ExpectedAttributeValue().withExists(false)); } attributes = transformAttributes( toParameters(attributes, clazz, config)); //overlay any user provided expected values. if(userProvidedExpectedValues != null){ expectedValues.putAll(userProvidedExpectedValues); } PutItemRequest req = new PutItemRequest().withTableName(tableName) .withItem(attributes).withExpected(expectedValues) .withRequestMetricCollector(config.getRequestMetricCollector()); db.putItem(applyUserAgent(req)); } /** * Deletes the given object from its DynamoDB table using the default configuration. */ public void delete(Object object) { delete(object, null, this.config); } /** * Deletes the given object from its DynamoDB table using the specified deleteExpression and default configuration. */ public void delete(Object object, DynamoDBDeleteExpression deleteExpression) { delete(object, deleteExpression, this.config); } /** * Deletes the given object from its DynamoDB table using the specified configuration. */ public void delete(Object object, DynamoDBMapperConfig config) { delete(object, null, config); } /** * Deletes the given object from its DynamoDB table using the provided deleteExpression and provided configuration. * Any options specified in the deleteExpression parameter will be overlaid on any constraints due to * versioned attributes. * @param deleteExpression * The options to apply to this delete request * @param config * Config override object. If {@link SaveBehavior#CLOBBER} is * supplied, version fields will not be considered when deleting * the object. */ public void delete(T object, DynamoDBDeleteExpression deleteExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); @SuppressWarnings("unchecked") Class clazz = (Class) object.getClass(); String tableName = getTableName(clazz, config); Map key = getKey(object, clazz); /* * If there is a version field, make sure we assert its value. If the * version field is null (only should happen in unusual circumstances), * pretend it doesn't have a version field after all. */ Map expectedValues = new HashMap(); if ( config.getSaveBehavior() != SaveBehavior.CLOBBER ) { for ( Method method : reflector.getRelevantGetters(clazz) ) { if ( reflector.isVersionAttributeGetter(method) ) { Object getterResult = safeInvoke(method, object); String attributeName = reflector.getAttributeName(method); ExpectedAttributeValue expected = new ExpectedAttributeValue(); AttributeValue currentValue = getSimpleAttributeValue(method, getterResult); expected.setExists(currentValue != null); if ( currentValue != null ) expected.setValue(currentValue); expectedValues.put(attributeName, expected); break; } } } //Overlay any user provided expected values onto the generated ones if(deleteExpression != null && deleteExpression.getExpected() != null){ expectedValues.putAll(deleteExpression.getExpected()); } DeleteItemRequest req = applyUserAgent(new DeleteItemRequest() .withKey(key).withTableName(tableName) .withExpected(expectedValues)) .withRequestMetricCollector(config.getRequestMetricCollector()) ; db.deleteItem(req); } /** * Deletes the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. No * version checks are performed, as required by the API. * * @see DynamoDBMapper#batchWrite(List, List, DynamoDBMapperConfig) */ public List batchDelete(List objectsToDelete) { return batchWrite(Collections.emptyList(), objectsToDelete, this.config); } /** * Deletes the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. No * version checks are performed, as required by the API. * * @see DynamoDBMapper#batchWrite(List, List, DynamoDBMapperConfig) */ public List batchDelete(Object... objectsToDelete) { return batchWrite(Collections.emptyList(), Arrays.asList(objectsToDelete), this.config); } /** * Saves the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. No * version checks are performed, as required by the API. *

* This method ignores any SaveBehavior set on the mapper, and * always behaves as if SaveBehavior.CLOBBER was specified, as * the AmazonDynamoDB.batchWriteItem() request does not support updating * existing items. * * @see DynamoDBMapper#batchWrite(List, List, DynamoDBMapperConfig) */ public List batchSave(List objectsToSave) { return batchWrite(objectsToSave, Collections.emptyList(), this.config); } /** * Saves the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. No * version checks are performed, as required by the API. *

* This method ignores any SaveBehavior set on the mapper, and * always behaves as if SaveBehavior.CLOBBER was specified, as * the AmazonDynamoDB.batchWriteItem() request does not support updating * existing items. * * @see DynamoDBMapper#batchWrite(List, List, DynamoDBMapperConfig) */ public List batchSave(Object... objectsToSave) { return batchWrite(Arrays.asList(objectsToSave), Collections.emptyList(), this.config); } /** * Saves and deletes the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. No * version checks are performed, as required by the API. *

* This method ignores any SaveBehavior set on the mapper, and * always behaves as if SaveBehavior.CLOBBER was specified, as * the AmazonDynamoDB.batchWriteItem() request does not support updating * existing items. * * @see DynamoDBMapper#batchWrite(List, List, DynamoDBMapperConfig) */ public List batchWrite(List objectsToWrite, List objectsToDelete) { return batchWrite(objectsToWrite, objectsToDelete, this.config); } /** * Saves and deletes the objects given using one or more calls to the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} API. * * @param objectsToWrite * A list of objects to save to DynamoDB. No version checks * are performed, as required by the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} * API. * @param objectsToDelete * A list of objects to delete from DynamoDB. No version * checks are performed, as required by the * {@link AmazonDynamoDB#batchWriteItem(BatchWriteItemRequest)} * API. * @param config * Only {@link DynamoDBMapperConfig#getTableNameOverride()} is * considered; if specified, all objects in the two parameter * lists will be considered to belong to the given table * override. In particular, this method always acts as * if SaveBehavior.CLOBBER was specified regardless of the * value of the config parameter. * @return A list of failed batches which includes the unprocessed items and * the exceptions causing the failure. */ public List batchWrite(List objectsToWrite, List objectsToDelete, DynamoDBMapperConfig config) { config = mergeConfig(config); List totalFailedBatches = new LinkedList(); HashMap> requestItems = new HashMap>(); List inMemoryUpdates = new LinkedList(); for ( Object toWrite : objectsToWrite ) { Class clazz = toWrite.getClass(); String tableName = getTableName(clazz, config); Map attributeValues = new HashMap(); // Look at every getter and construct a value object for it for ( Method method : reflector.getRelevantGetters(clazz) ) { Object getterResult = safeInvoke(method, toWrite); String attributeName = reflector.getAttributeName(method); AttributeValue currentValue = null; if ( getterResult == null && reflector.isAssignableKey(method) ) { currentValue = getAutoGeneratedKeyAttributeValue(method, getterResult); inMemoryUpdates.add(new ValueUpdate(method, currentValue, toWrite)); } else { currentValue = getSimpleAttributeValue(method, getterResult); } if ( currentValue != null ) { attributeValues.put(attributeName, currentValue); } } if ( !requestItems.containsKey(tableName) ) { requestItems.put(tableName, new LinkedList()); } AttributeTransformer.Parameters parameters = toParameters(attributeValues, clazz, config); requestItems.get(tableName).add( new WriteRequest().withPutRequest( new PutRequest().withItem( transformAttributes(parameters)))); } for ( Object toDelete : objectsToDelete ) { Class clazz = toDelete.getClass(); String tableName = getTableName(clazz, config); Map key = getKey(toDelete); if ( !requestItems.containsKey(tableName) ) { requestItems.put(tableName, new LinkedList()); } requestItems.get(tableName).add( new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(key))); } // Break into chunks of 25 items and make service requests to DynamoDB while ( !requestItems.isEmpty() ) { HashMap> batch = new HashMap>(); int i = 0; Iterator>> tableIter = requestItems.entrySet().iterator(); while ( tableIter.hasNext() && i < 25 ) { Entry> tableRequest = tableIter.next(); batch.put(tableRequest.getKey(), new LinkedList()); Iterator writeRequestIter = tableRequest.getValue().iterator(); while ( writeRequestIter.hasNext() && i++ < 25 ) { WriteRequest writeRequest = writeRequestIter.next(); batch.get(tableRequest.getKey()).add(writeRequest); writeRequestIter.remove(); } // If we've processed all the write requests for this table, // remove it from the parent iterator. if ( !writeRequestIter.hasNext() ) { tableIter.remove(); } } List failedBatches = writeOneBatch(batch); if (failedBatches != null) { totalFailedBatches.addAll(failedBatches); // If contains throttling exception, we do a backoff if (containsThrottlingException(failedBatches)) { try { Thread.sleep(1000 * 2); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new AmazonClientException(e.getMessage(), e); } } } } // Once the entire batch is processed, update assigned keys in memory for ( ValueUpdate update : inMemoryUpdates ) { update.apply(); } return totalFailedBatches; } /** * Process one batch of requests(max 25). It will divide the batch if * receives request too large exception(the total size of the request is beyond 1M). */ private List writeOneBatch(Map> batch) { List failedBatches = new LinkedList(); Map> firstHalfBatch = new HashMap>(); Map> secondHalfBatch = new HashMap>(); FailedBatch failedBatch = callUntilCompletion(batch); if (failedBatch != null) { // If the exception is request entity too large, we divide the batch // into smaller parts. if (failedBatch.getException() instanceof AmazonServiceException && RetryUtils.isRequestEntityTooLargeException((AmazonServiceException) failedBatch.getException())) { // If only one item left, the item size must beyond 64k, which // exceedes the limit. if (computeFailedBatchSize(failedBatch) == 1) { failedBatches.add(failedBatch); } else { divideBatch(batch, firstHalfBatch, secondHalfBatch); failedBatches.addAll(writeOneBatch(firstHalfBatch)); failedBatches.addAll(writeOneBatch(secondHalfBatch)); } } else { failedBatches.add(failedBatch); } } return failedBatches; } /** * Check whether there are throttling exception in the failed batches. */ private boolean containsThrottlingException (List failedBatches) { for (FailedBatch failedBatch : failedBatches) { Exception e = failedBatch.getException(); if (e instanceof AmazonServiceException && RetryUtils.isThrottlingException((AmazonServiceException) e)) { return true; } } return false; } /** * Divide the batch of objects to save into two smaller batches. Each contains half of the elements. */ private void divideBatch(Map> batch, Map> firstHalfBatch, Map> secondHalfBatch) { for (String key : batch.keySet()) { List requests = batch.get(key); List firstHalfRequests = requests.subList(0, requests.size() / 2); List secondHalfRequests = requests.subList(requests.size() / 2, requests.size()); firstHalfBatch.put(key, firstHalfRequests); secondHalfBatch.put(key, secondHalfRequests); } } /** * Count the total number of unprocessed items in the failed batch. */ private int computeFailedBatchSize(FailedBatch failedBatch) { int count = 0; for (String tableName : failedBatch.getUnprocessedItems().keySet()) { count += failedBatch.getUnprocessedItems().get(tableName).size(); } return count; } /** * Continue trying to process the batch until it finishes or an exception * occurs. */ private FailedBatch callUntilCompletion(Map> batch) { BatchWriteItemResult result = null; int retries = 0; FailedBatch failedBatch = null; while (true) { try { result = db.batchWriteItem(new BatchWriteItemRequest().withRequestItems(batch)); } catch (Exception e) { failedBatch = new FailedBatch(); failedBatch.setUnprocessedItems(batch); failedBatch.setException(e); return failedBatch; } retries++; batch = result.getUnprocessedItems(); if (batch.size() > 0) { pauseExponentially(retries); } else { break; } } return failedBatch; } /** * Retrieves multiple items from multiple tables using their primary keys. * * @see DynamoDBMapper#batchLoad(List, DynamoDBMapperConfig) */ public Map> batchLoad(List itemsToGet) { return batchLoad(itemsToGet, this.config); } /** * Retrieves multiple items from multiple tables using their primary keys. * * @param itemsToGet * Key objects, corresponding to the class to fetch, with their * primary key values set. * @param config * Only {@link DynamoDBMapperConfig#getTableNameOverride()} and * {@link DynamoDBMapperConfig#getConsistentReads()} are * considered. */ public Map> batchLoad(List itemsToGet, DynamoDBMapperConfig config) { config = mergeConfig(config); boolean consistentReads = (config.getConsistentReads() == ConsistentReads.CONSISTENT); if ( itemsToGet == null || itemsToGet.isEmpty() ) { return new HashMap>(); } Map requestItems = new HashMap(); Map> classesByTableName = new HashMap>(); Map> resultSet = new HashMap>(); int count = 0; for ( Object keyObject : itemsToGet ) { Class clazz = keyObject.getClass(); String tableName = getTableName(clazz, config); classesByTableName.put(tableName, clazz); if ( !requestItems.containsKey(tableName) ) { requestItems.put( tableName, new KeysAndAttributes().withConsistentRead(consistentReads).withKeys( new LinkedList>())); } requestItems.get(tableName).getKeys().add(getKey(keyObject)); // Reach the maximum number which can be handled in a single batchGet if ( ++count == 100 ) { processBatchGetRequest(classesByTableName, requestItems, resultSet, config); requestItems.clear(); count = 0; } } if ( count > 0 ) { processBatchGetRequest(classesByTableName, requestItems, resultSet, config); } return resultSet; } /** * Retrieves the attributes for multiple items from multiple tables using * their primary keys. * {@link AmazonDynamoDB#batchGetItem(BatchGetItemRequest)} API. * * @see #batchLoad(List, DynamoDBMapperConfig) * @see #batchLoad(Map, DynamoDBMapperConfig) */ public Map> batchLoad(Map, List> itemsToGet) { return batchLoad(itemsToGet, this.config); } /** * Retrieves multiple items from multiple tables using their primary keys. * Valid only for tables with a single hash key, or a single hash and range * key. For other schemas, use * {@link DynamoDBMapper#batchLoad(List, DynamoDBMapperConfig)} * * @param itemsToGet * Map from class to load to list of primary key attributes. * @param config * Only {@link DynamoDBMapperConfig#getTableNameOverride()} and * {@link DynamoDBMapperConfig#getConsistentReads()} are * considered. */ public Map> batchLoad(Map, List> itemsToGet, DynamoDBMapperConfig config) { List keys = new ArrayList(); if ( itemsToGet != null ) { for ( Class clazz : itemsToGet.keySet() ) { if ( itemsToGet.get(clazz) != null ) { for ( KeyPair keyPair : itemsToGet.get(clazz) ) { keys.add(createKeyObject(clazz, keyPair.getHashKey(), keyPair.getRangeKey())); } } } } return batchLoad(keys, config); } /** * @param config never null */ private void processBatchGetRequest( final Map> classesByTableName, final Map requestItems, final Map> resultSet, final DynamoDBMapperConfig config) { BatchGetItemResult batchGetItemResult = null; BatchGetItemRequest batchGetItemRequest = new BatchGetItemRequest() .withRequestMetricCollector(config.getRequestMetricCollector()); batchGetItemRequest.setRequestItems(requestItems); int retries = 0; int noOfItemsInOriginalRequest = requestItems.size(); do { if ( batchGetItemResult != null ) { retries++; if (noOfItemsInOriginalRequest == batchGetItemResult .getUnprocessedKeys().size()){ pauseExponentially(retries); if (retries > BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS) { throw new AmazonClientException( "Batch Get Item request to server hasn't received any data. Please try again later."); } } batchGetItemRequest.setRequestItems(batchGetItemResult.getUnprocessedKeys()); } batchGetItemResult = db.batchGetItem(batchGetItemRequest); Map>> responses = batchGetItemResult.getResponses(); for ( String tableName : responses.keySet() ) { List objects = null; if ( resultSet.get(tableName) != null ) { objects = resultSet.get(tableName); } else { objects = new LinkedList(); } Class clazz = classesByTableName.get(tableName); for ( Map item : responses.get(tableName) ) { AttributeTransformer.Parameters parameters = toParameters(item, clazz, config); objects.add(marshalIntoObject(parameters)); } resultSet.put(tableName, objects); } // To see whether there are unprocessed keys. } while ( batchGetItemResult.getUnprocessedKeys() != null && batchGetItemResult.getUnprocessedKeys().size() > 0 ); } /** * Swallows the checked exceptions around Method.invoke and repackages them * as {@link DynamoDBMappingException} */ private Object safeInvoke(Method method, Object object, Object... arguments) { try { return method.invoke(object, arguments); } catch ( IllegalAccessException e ) { throw new DynamoDBMappingException("Couldn't invoke " + method, e); } catch ( IllegalArgumentException e ) { throw new DynamoDBMappingException("Couldn't invoke " + method, e); } catch ( InvocationTargetException e ) { throw new DynamoDBMappingException("Couldn't invoke " + method, e); } } private final class ValueUpdate { private Method method; private AttributeValue newValue; private Object target; public ValueUpdate(Method method, AttributeValue newValue, Object target) { this.method = method; this.newValue = newValue; this.target = target; } public void apply() { setValue(target, method, newValue); } } /** * Converts the {@link AttributeValueUpdate} map given to an equivalent * {@link AttributeValue} map. */ private Map convertToItem(Map putValues) { Map map = new HashMap(); for ( Entry entry : putValues.entrySet() ) { /* * AttributeValueUpdate allows nulls for its values, since they are * semantically meaningful. AttributeValues never have null values. */ if ( entry.getValue().getValue() != null ) map.put(entry.getKey(), entry.getValue().getValue()); } return map; } /** * Gets the attribute value object corresponding to the * {@link DynamoDBVersionAttribute} getter, and its result, given. Null * values are assumed to be new objects and given the smallest possible * positive value. Non-null values are incremented from their current value. */ private AttributeValue getVersionAttributeValue(final Method getter, Object getterReturnResult) { ArgumentMarshaller marshaller = reflector.getVersionedArgumentMarshaller(getter, getterReturnResult); return marshaller.marshall(getterReturnResult); } /** * Returns an attribute value corresponding to the key method and value given. */ private AttributeValue getAutoGeneratedKeyAttributeValue(Method getter, Object getterResult) { ArgumentMarshaller marshaller = reflector.getAutoGeneratedKeyArgumentMarshaller(getter); return marshaller.marshall(getterResult); } /** * Scans through an Amazon DynamoDB table and returns the matching results as * an unmodifiable list of instantiated objects, using the default configuration. * * @see DynamoDBMapper#scan(Class, DynamoDBScanExpression, DynamoDBMapperConfig) */ public PaginatedScanList scan(Class clazz, DynamoDBScanExpression scanExpression) { return scan(clazz, scanExpression, config); } /** * Scans through an Amazon DynamoDB table and returns the matching results as * an unmodifiable list of instantiated objects. The table to scan is * determined by looking at the annotations on the specified class, which * declares where to store the object data in Amazon DynamoDB, and the scan * expression parameter allows the caller to filter results and control how * the scan is executed. *

* Callers should be aware that the returned list is unmodifiable, and any * attempts to modify the list will result in an * UnsupportedOperationException. *

* You can specify the pagination loading strategy for this scan operation. * By default, the list returned is lazily loaded when possible. * * @param * The type of the objects being returned. * @param clazz * The class annotated with DynamoDB annotations describing how * to store the object data in Amazon DynamoDB. * @param scanExpression * Details on how to run the scan, including any filters to apply * to limit results. * @param config * The configuration to use for this scan, which overrides the * default provided at object construction. * @return An unmodifiable list of the objects constructed from the results * of the scan operation. * @see PaginatedScanList * @see PaginationLoadingStrategy */ public PaginatedScanList scan(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); ScanResult scanResult = db.scan(applyUserAgent(scanRequest)); return new PaginatedScanList(this, clazz, db, scanRequest, scanResult, config.getPaginationLoadingStrategy(), config); } /** * Scans through an Amazon DynamoDB table on logically partitioned segments * in parallel and returns the matching results in one unmodifiable list of * instantiated objects, using the default configuration. * * @see DynamoDBMapper#parallelScan(Class, DynamoDBScanExpression,int, * DynamoDBMapperConfig) */ public PaginatedParallelScanList parallelScan(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments) { return parallelScan(clazz, scanExpression, totalSegments, config); } /** * Scans through an Amazon DynamoDB table on logically partitioned segments * in parallel. This method will create a thread pool of the specified size, * and each thread will issue scan requests for its assigned segment, * following the returned continuation token, until the end of its segment. * Callers should be responsible for setting the appropriate number of total * segments. More scan segments would result in better performance but more * consumed capacity of the table. The results are returned in one * unmodifiable list of instantiated objects. The table to scan is * determined by looking at the annotations on the specified class, which * declares where to store the object data in Amazon DynamoDB, and the scan * expression parameter allows the caller to filter results and control how * the scan is executed. *

* Callers should be aware that the returned list is unmodifiable, and any * attempts to modify the list will result in an * UnsupportedOperationException. *

* You can specify the pagination loading strategy for this parallel scan operation. * By default, the list returned is lazily loaded when possible. * * @param * The type of the objects being returned. * @param clazz * The class annotated with DynamoDB annotations describing how * to store the object data in Amazon DynamoDB. * @param scanExpression * Details on how to run the scan, including any filters to apply * to limit results. * @param totalSegments * Number of total parallel scan segments. * Range: 1 - 4096 * @param config * The configuration to use for this scan, which overrides the * default provided at object construction. * @return An unmodifiable list of the objects constructed from the results * of the scan operation. * @see PaginatedParallelScanList * @see PaginationLoadingStrategy */ public PaginatedParallelScanList parallelScan(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments, DynamoDBMapperConfig config) { config = mergeConfig(config); // Create hard copies of the original scan request with difference segment number. List parallelScanRequests = createParallelScanRequestsFromExpression(clazz, scanExpression, totalSegments, config); ParallelScanTask parallelScanTask = new ParallelScanTask(this, db, parallelScanRequests); return new PaginatedParallelScanList(this, clazz, db, parallelScanTask, config.getPaginationLoadingStrategy(), config); } /** * Scans through an Amazon DynamoDB table and returns a single page of matching * results. The table to scan is determined by looking at the annotations on * the specified class, which declares where to store the object data in AWS * DynamoDB, and the scan expression parameter allows the caller to filter * results and control how the scan is executed. * * @param * The type of the objects being returned. * @param clazz * The class annotated with DynamoDB annotations describing how * to store the object data in Amazon DynamoDB. * @param scanExpression * Details on how to run the scan, including any filters to apply * to limit results. * @param config * The configuration to use for this scan, which overrides the * default provided at object construction. */ public ScanResultPage scanPage(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); ScanResult scanResult = db.scan(applyUserAgent(scanRequest)); ScanResultPage result = new ScanResultPage(); List> parameters = toParameters(scanResult.getItems(), clazz, config); result.setResults(marshalIntoObjects(parameters)); result.setLastEvaluatedKey(scanResult.getLastEvaluatedKey()); return result; } /** * Scans through an Amazon DynamoDB table and returns a single page of matching * results. * * @see DynamoDBMapper#scanPage(Class, DynamoDBScanExpression, DynamoDBMapperConfig) */ public ScanResultPage scanPage(Class clazz, DynamoDBScanExpression scanExpression) { return scanPage(clazz, scanExpression, this.config); } /** * Queries an Amazon DynamoDB table and returns the matching results as an * unmodifiable list of instantiated objects, using the default * configuration. * * @see DynamoDBMapper#query(Class, DynamoDBQueryExpression, * DynamoDBMapperConfig) */ public PaginatedQueryList query(Class clazz, DynamoDBQueryExpression queryExpression) { return query(clazz, queryExpression, config); } /** * Queries an Amazon DynamoDB table and returns the matching results as an * unmodifiable list of instantiated objects. The table to query is * determined by looking at the annotations on the specified class, which * declares where to store the object data in Amazon DynamoDB, and the query * expression parameter allows the caller to filter results and control how * the query is executed. *

* When the query is on any local/global secondary index, callers should be aware that * the returned object(s) will only contain item attributes that are projected * into the index. All the other unprojected attributes will be saved as type * default values. *

* Callers should also be aware that the returned list is unmodifiable, and any * attempts to modify the list will result in an * UnsupportedOperationException. *

* You can specify the pagination loading strategy for this query operation. * By default, the list returned is lazily loaded when possible. * * @param * The type of the objects being returned. * @param clazz * The class annotated with DynamoDB annotations describing how * to store the object data in Amazon DynamoDB. * @param queryExpression * Details on how to run the query, including any conditions on * the key values * @param config * The configuration to use for this query, which overrides the * default provided at object construction. * @return An unmodifiable list of the objects constructed from the results * of the query operation. * @see PaginatedQueryList * @see PaginationLoadingStrategy */ public PaginatedQueryList query(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); QueryResult queryResult = db.query(applyUserAgent(queryRequest)); return new PaginatedQueryList(this, clazz, db, queryRequest, queryResult, config.getPaginationLoadingStrategy(), config); } /** * Queries an Amazon DynamoDB table and returns a single page of matching * results. The table to query is determined by looking at the annotations * on the specified class, which declares where to store the object data in * Amazon DynamoDB, and the query expression parameter allows the caller to * filter results and control how the query is executed. * * @see DynamoDBMapper#queryPage(Class, DynamoDBQueryExpression, DynamoDBMapperConfig) */ public QueryResultPage queryPage(Class clazz, DynamoDBQueryExpression queryExpression) { return queryPage(clazz, queryExpression, this.config); } /** * Queries an Amazon DynamoDB table and returns a single page of matching * results. The table to query is determined by looking at the annotations * on the specified class, which declares where to store the object data in * Amazon DynamoDB, and the query expression parameter allows the caller to * filter results and control how the query is executed. * * @param * The type of the objects being returned. * @param clazz * The class annotated with DynamoDB annotations describing how * to store the object data in AWS DynamoDB. * @param queryExpression * Details on how to run the query, including any conditions on * the key values * @param config * The configuration to use for this query, which overrides the * default provided at object construction. */ public QueryResultPage queryPage(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); QueryResult scanResult = db.query(applyUserAgent(queryRequest)); QueryResultPage result = new QueryResultPage(); List> parameters = toParameters(scanResult.getItems(), clazz, config); result.setResults(marshalIntoObjects(parameters)); result.setLastEvaluatedKey(scanResult.getLastEvaluatedKey()); return result; } /** * Evaluates the specified scan expression and returns the count of matching * items, without returning any of the actual item data, using the default configuration. * * @see DynamoDBMapper#count(Class, DynamoDBScanExpression, DynamoDBMapperConfig) */ public int count(Class clazz, DynamoDBScanExpression scanExpression) { return count(clazz, scanExpression, config); } /** * Evaluates the specified scan expression and returns the count of matching * items, without returning any of the actual item data. *

* This operation will scan your entire table, and can therefore be very * expensive. Use with caution. * * @param clazz * The class mapped to a DynamoDB table. * @param scanExpression * The parameters for running the scan. * @param config * The configuration to use for this scan, which overrides the * default provided at object construction. * @return The count of matching items, without returning any of the actual * item data. */ public int count(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); scanRequest.setSelect(Select.COUNT); // Count scans can also be truncated for large datasets int count = 0; ScanResult scanResult = null; do { scanResult = db.scan(applyUserAgent(scanRequest)); count += scanResult.getCount(); scanRequest.setExclusiveStartKey(scanResult.getLastEvaluatedKey()); } while (scanResult.getLastEvaluatedKey() != null); return count; } /** * Evaluates the specified query expression and returns the count of matching * items, without returning any of the actual item data, using the default configuration. * * @see DynamoDBMapper#count(Class, DynamoDBQueryExpression, DynamoDBMapperConfig) */ public int count(Class clazz, DynamoDBQueryExpression queryExpression) { return count(clazz, queryExpression, config); } /** * Evaluates the specified query expression and returns the count of * matching items, without returning any of the actual item data. * * @param clazz * The class mapped to a DynamoDB table. * @param queryExpression * The parameters for running the scan. * @param config * The mapper configuration to use for the query, which overrides * the default provided at object construction. * @return The count of matching items, without returning any of the actual * item data. */ public int count(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { config = mergeConfig(config); QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); queryRequest.setSelect(Select.COUNT); // Count queries can also be truncated for large datasets int count = 0; QueryResult queryResult = null; do { queryResult = db.query(applyUserAgent(queryRequest)); count += queryResult.getCount(); queryRequest.setExclusiveStartKey(queryResult.getLastEvaluatedKey()); } while (queryResult.getLastEvaluatedKey() != null); return count; } /** * Merges the config object given with the one specified at construction and * returns the result. */ private DynamoDBMapperConfig mergeConfig(DynamoDBMapperConfig config) { if ( config != this.config ) config = new DynamoDBMapperConfig(this.config, config); return config; } /** * @param config never null */ private ScanRequest createScanRequestFromExpression(Class clazz, DynamoDBScanExpression scanExpression, DynamoDBMapperConfig config) { ScanRequest scanRequest = new ScanRequest(); scanRequest.setTableName(getTableName(clazz, config)); scanRequest.setScanFilter(scanExpression.getScanFilter()); scanRequest.setLimit(scanExpression.getLimit()); scanRequest.setExclusiveStartKey(scanExpression.getExclusiveStartKey()); scanRequest.setTotalSegments(scanExpression.getTotalSegments()); scanRequest.setSegment(scanExpression.getSegment()); scanRequest.setRequestMetricCollector(config.getRequestMetricCollector()); return scanRequest; } /** * @param config never null */ private List createParallelScanRequestsFromExpression(Class clazz, DynamoDBScanExpression scanExpression, int totalSegments, DynamoDBMapperConfig config) { if (totalSegments < 1) { throw new IllegalArgumentException("Parallel scan should have at least one scan segment."); } if (scanExpression.getExclusiveStartKey() != null) { log.info("The ExclusiveStartKey parameter specified in the DynamoDBScanExpression is ignored," + " since the individual parallel scan request on each segment is applied on a separate key scope."); } if (scanExpression.getSegment() != null || scanExpression.getTotalSegments() != null) { log.info("The Segment and TotalSegments parameters specified in the DynamoDBScanExpression are ignored."); } List parallelScanRequests= new LinkedList(); for (int segment = 0; segment < totalSegments; segment++) { ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); parallelScanRequests.add(scanRequest .withSegment(segment).withTotalSegments(totalSegments) .withExclusiveStartKey(null)); } return parallelScanRequests; } private QueryRequest createQueryRequestFromExpression(Class clazz, DynamoDBQueryExpression queryExpression, DynamoDBMapperConfig config) { QueryRequest queryRequest = new QueryRequest(); queryRequest.setConsistentRead(queryExpression.isConsistentRead()); queryRequest.setTableName(getTableName(clazz, config)); queryRequest.setIndexName(queryExpression.getIndexName()); // Hash key (primary or index) conditions Map hashKeyConditions = getHashKeyEqualsConditions(queryExpression.getHashKeyValues()); // Range key (primary or index) conditions Map rangeKeyConditions = queryExpression.getRangeKeyConditions(); processKeyConditions(clazz, queryRequest, hashKeyConditions, rangeKeyConditions); queryRequest.setScanIndexForward(queryExpression.isScanIndexForward()); queryRequest.setLimit(queryExpression.getLimit()); queryRequest.setExclusiveStartKey(queryExpression.getExclusiveStartKey()); queryRequest.setRequestMetricCollector(config.getRequestMetricCollector()); return queryRequest; } /** * Utility method for checking the validity of both hash and range key * conditions. It also tries to infer the correct index name from the POJO * annotation, if such information is not directly specified by the user. * * @param clazz * The domain class of the queried items. * @param queryRequest * The QueryRequest object to be sent to service. * @param hashKeyConditions * All the hash key EQ conditions extracted from the POJO object. * The mapper will choose one of them that could be applied together with * the user-specified (if any) index name and range key conditions. Or it * throws error if more than one conditions are applicable for the query. * @param rangeKeyConditions * The range conditions specified by the user. We currently only * allow at most one range key condition. */ private void processKeyConditions(Class clazz, QueryRequest queryRequest, Map hashKeyConditions, Map rangeKeyConditions) { // There should be least one hash key condition. if (hashKeyConditions == null || hashKeyConditions.isEmpty()) { throw new IllegalArgumentException("Illegal query expression: No hash key condition is found in the query"); } // We don't allow multiple range key conditions. if (rangeKeyConditions != null && rangeKeyConditions.size() > 1) { throw new IllegalArgumentException( "Illegal query expression: Conditions on multiple range keys (" + rangeKeyConditions.keySet().toString() + ") are found in the query. DynamoDB service only accepts up to ONE range key condition."); } final boolean hasRangeKeyCondition = (rangeKeyConditions != null) && (!rangeKeyConditions.isEmpty()); final String userProvidedIndexName = queryRequest.getIndexName(); final String primaryHashKeyName = reflector.getPrimaryHashKeyName(clazz); final TableIndexesInfo parsedIndexesInfo = schemaParser.parseTableIndexes(clazz, reflector); // First collect the names of all the global/local secondary indexes that could be applied to this query. // If the user explicitly specified an index name, we also need to // 1) check the index is applicable for both hash and range key conditions // 2) choose one hash key condition if there are more than one of them boolean hasPrimaryHashKeyCondition = false; final Map> annotatedGSIsOnHashKeys = new HashMap>(); String hashKeyNameForThisQuery = null; boolean hasPrimaryRangeKeyCondition = false; final Set annotatedLSIsOnRangeKey = new HashSet(); final Set annotatedGSIsOnRangeKey = new HashSet(); // Range key condition String rangeKeyNameForThisQuery = null; if (hasRangeKeyCondition) { for (String rangeKeyName : rangeKeyConditions.keySet()) { rangeKeyNameForThisQuery = rangeKeyName; if (reflector.hasPrimaryRangeKey(clazz) && rangeKeyName.equals(reflector.getPrimaryRangeKeyName(clazz))) { hasPrimaryRangeKeyCondition = true; } Collection annotatedLSI = parsedIndexesInfo.getLsiNamesByIndexRangeKey(rangeKeyName); if (annotatedLSI != null) { annotatedLSIsOnRangeKey.addAll(annotatedLSI); } Collection annotatedGSI = parsedIndexesInfo.getGsiNamesByIndexRangeKey(rangeKeyName); if (annotatedGSI != null) { annotatedGSIsOnRangeKey.addAll(annotatedGSI); } } if ( !hasPrimaryRangeKeyCondition && annotatedLSIsOnRangeKey.isEmpty() && annotatedGSIsOnRangeKey.isEmpty()) { throw new DynamoDBMappingException( "The query contains a condition on a range key (" + rangeKeyNameForThisQuery + ") " + "that is not annotated with either @DynamoDBRangeKey or @DynamoDBIndexRangeKey."); } } final boolean userProvidedLSIWithRangeKeyCondition = (userProvidedIndexName != null) && (annotatedLSIsOnRangeKey.contains(userProvidedIndexName)); final boolean hashOnlyLSIQuery = (userProvidedIndexName != null) && ( !hasRangeKeyCondition ) && parsedIndexesInfo.getAllLsiNames().contains(userProvidedIndexName); final boolean userProvidedLSI = userProvidedLSIWithRangeKeyCondition || hashOnlyLSIQuery; final boolean userProvidedGSIWithRangeKeyCondition = (userProvidedIndexName != null) && (annotatedGSIsOnRangeKey.contains(userProvidedIndexName)); final boolean hashOnlyGSIQuery = (userProvidedIndexName != null) && ( !hasRangeKeyCondition ) && parsedIndexesInfo.getAllGsiNames().contains(userProvidedIndexName); final boolean userProvidedGSI = userProvidedGSIWithRangeKeyCondition || hashOnlyGSIQuery; if (userProvidedLSI && userProvidedGSI ) { throw new DynamoDBMappingException( "Invalid query: " + "Index \"" + userProvidedIndexName + "\" " + "is annotateded as both a LSI and a GSI for attribute."); } // Hash key conditions for (String hashKeyName : hashKeyConditions.keySet()) { if (hashKeyName.equals(primaryHashKeyName)) { hasPrimaryHashKeyCondition = true; } Collection annotatedGSINames = parsedIndexesInfo.getGsiNamesByIndexHashKey(hashKeyName); annotatedGSIsOnHashKeys.put(hashKeyName, annotatedGSINames == null ? new HashSet() : new HashSet(annotatedGSINames)); // Additional validation if the user provided an index name. if (userProvidedIndexName != null) { boolean foundHashKeyConditionValidWithUserProvidedIndex = false; if (userProvidedLSI && hashKeyName.equals(primaryHashKeyName)) { // found an applicable hash key condition (primary hash + LSI range) foundHashKeyConditionValidWithUserProvidedIndex = true; } else if (userProvidedGSI && annotatedGSINames != null && annotatedGSINames.contains(userProvidedIndexName)) { // found an applicable hash key condition (GSI hash + range) foundHashKeyConditionValidWithUserProvidedIndex = true; } if (foundHashKeyConditionValidWithUserProvidedIndex) { if ( hashKeyNameForThisQuery != null ) { throw new IllegalArgumentException( "Ambiguous query expression: More than one hash key EQ conditions (" + hashKeyNameForThisQuery + ", " + hashKeyName + ") are applicable to the specified index (" + userProvidedIndexName + "). " + "Please provide only one of them in the query expression."); } else { // found an applicable hash key condition hashKeyNameForThisQuery = hashKeyName; } } } } // Collate all the key conditions Map keyConditions = new HashMap(); // With user-provided index name if (userProvidedIndexName != null) { if (hasRangeKeyCondition && ( !userProvidedLSI ) && ( !userProvidedGSI )) { throw new IllegalArgumentException( "Illegal query expression: No range key condition is applicable to the specified index (" + userProvidedIndexName + "). "); } if (hashKeyNameForThisQuery == null) { throw new IllegalArgumentException( "Illegal query expression: No hash key condition is applicable to the specified index (" + userProvidedIndexName + "). "); } keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); if (hasRangeKeyCondition) { keyConditions.putAll(rangeKeyConditions); } } // Infer the index name by finding the index shared by both hash and range key annotations. else { if (hasRangeKeyCondition) { String inferredIndexName = null; hashKeyNameForThisQuery = null; if (hasPrimaryHashKeyCondition && hasPrimaryRangeKeyCondition) { // Found valid query: primary hash + range key conditions hashKeyNameForThisQuery = primaryHashKeyName; } else { // Intersect the set of all the indexes applicable to the range key // with the set of indexes applicable to each hash key condition. for (String hashKeyName : annotatedGSIsOnHashKeys.keySet()) { boolean foundValidQueryExpressionWithInferredIndex = false; String indexNameInferredByThisHashKey = null; if (hashKeyName.equals(primaryHashKeyName)) { if (annotatedLSIsOnRangeKey.size() == 1) { // Found valid query (Primary hash + LSI range conditions) foundValidQueryExpressionWithInferredIndex = true; indexNameInferredByThisHashKey = annotatedLSIsOnRangeKey.iterator().next(); } } Set annotatedGSIsOnHashKey = annotatedGSIsOnHashKeys.get(hashKeyName); // We don't need the data in annotatedGSIsOnHashKeys afterwards, // so it's safe to do the intersection in-place. annotatedGSIsOnHashKey.retainAll(annotatedGSIsOnRangeKey); if (annotatedGSIsOnHashKey.size() == 1) { // Found valid query (Hash + range conditions on a GSI) if (foundValidQueryExpressionWithInferredIndex) { hashKeyNameForThisQuery = hashKeyName; inferredIndexName = indexNameInferredByThisHashKey; } foundValidQueryExpressionWithInferredIndex = true; indexNameInferredByThisHashKey = annotatedGSIsOnHashKey.iterator().next(); } if (foundValidQueryExpressionWithInferredIndex) { if (hashKeyNameForThisQuery != null) { throw new IllegalArgumentException( "Ambiguous query expression: Found multiple valid queries: " + "(Hash: \"" + hashKeyNameForThisQuery + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + inferredIndexName + "\") and " + "(Hash: \"" + hashKeyName + "\", Range: \"" + rangeKeyNameForThisQuery + "\", Index: \"" + indexNameInferredByThisHashKey + "\")."); } else { hashKeyNameForThisQuery = hashKeyName; inferredIndexName = indexNameInferredByThisHashKey; } } } } if (hashKeyNameForThisQuery != null) { keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); keyConditions.putAll(rangeKeyConditions); queryRequest.setIndexName(inferredIndexName); } else { throw new IllegalArgumentException( "Illegal query expression: Cannot infer the index name from the query expression."); } } else { // No range key condition is specified. if (hashKeyConditions.size() > 1) { if ( hasPrimaryHashKeyCondition ) { keyConditions.put(primaryHashKeyName, hashKeyConditions.get(primaryHashKeyName)); } else { throw new IllegalArgumentException( "Ambiguous query expression: More than one index hash key EQ conditions (" + hashKeyConditions.keySet() + ") are applicable to the query. " + "Please provide only one of them in the query expression, or specify the appropriate index name."); } } else { // Only one hash key condition String hashKeyName = annotatedGSIsOnHashKeys.keySet().iterator().next(); if ( !hasPrimaryHashKeyCondition ) { if (annotatedGSIsOnHashKeys.get(hashKeyName).size() == 1) { // Set the index if the index hash key is only annotated with one GSI. queryRequest.setIndexName(annotatedGSIsOnHashKeys.get(hashKeyName).iterator().next()); } else if (annotatedGSIsOnHashKeys.get(hashKeyName).size() > 1) { throw new IllegalArgumentException( "Ambiguous query expression: More than one GSIs (" + annotatedGSIsOnHashKeys.get(hashKeyName) + ") are applicable to the query. " + "Please specify one of them in your query expression."); } else { throw new IllegalArgumentException( "Illegal query expression: No GSI is found in the @DynamoDBIndexHashKey annotation for attribute " + "\"" + hashKeyName + "\"."); } } keyConditions.putAll(hashKeyConditions); } } } queryRequest.setKeyConditions(keyConditions); } private AttributeTransformer.Parameters toParameters( final Map attributeValues, final Class modelClass, final DynamoDBMapperConfig mapperConfig) { return toParameters(attributeValues, false, modelClass, mapperConfig); } private AttributeTransformer.Parameters toParameters( final Map attributeValues, final boolean partialUpdate, final Class modelClass, final DynamoDBMapperConfig mapperConfig) { return new TransformerParameters(reflector, attributeValues, partialUpdate, modelClass, mapperConfig); } final List> toParameters( final List> attributeValues, final Class modelClass, final DynamoDBMapperConfig mapperConfig ) { List> rval = new ArrayList>( attributeValues.size()); for (Map item : attributeValues) { rval.add(toParameters(item, modelClass, mapperConfig)); } return rval; } /** * The one true implementation of AttributeTransformer.Parameters. */ private static class TransformerParameters implements AttributeTransformer.Parameters { private final DynamoDBReflector reflector; private final Map attributeValues; private final boolean partialUpdate; private final Class modelClass; private final DynamoDBMapperConfig mapperConfig; private String tableName; private String hashKeyName; private String rangeKeyName; public TransformerParameters( final DynamoDBReflector reflector, final Map attributeValues, final boolean partialUpdate, final Class modelClass, final DynamoDBMapperConfig mapperConfig) { this.reflector = reflector; this.attributeValues = Collections.unmodifiableMap(attributeValues); this.partialUpdate = partialUpdate; this.modelClass = modelClass; this.mapperConfig = mapperConfig; } @Override public Map getAttributeValues() { return attributeValues; } @Override public boolean isPartialUpdate() { return partialUpdate; } @Override public Class getModelClass() { return modelClass; } @Override public DynamoDBMapperConfig getMapperConfig() { return mapperConfig; } @Override public String getTableName() { if (tableName == null) { tableName = DynamoDBMapper .getTableName(modelClass, mapperConfig, reflector); } return tableName; } @Override public String getHashKeyName() { if (hashKeyName == null) { Method hashKeyGetter = reflector.getPrimaryHashKeyGetter(modelClass); hashKeyName = reflector.getAttributeName(hashKeyGetter); } return hashKeyName; } @Override public String getRangeKeyName() { if (rangeKeyName == null) { Method rangeKeyGetter = reflector.getPrimaryRangeKeyGetter(modelClass); if (rangeKeyGetter == null) { rangeKeyName = NO_RANGE_KEY; } else { rangeKeyName = reflector.getAttributeName(rangeKeyGetter); } } if (rangeKeyName == NO_RANGE_KEY) { return null; } return rangeKeyName; } } private Map untransformAttributes( final AttributeTransformer.Parameters parameters ) { if (transformer != null) { return transformer.untransform(parameters); } return untransformAttributes( parameters.getModelClass(), parameters.getAttributeValues()); } /** * By default, just calls {@link #untransformAttributes(String, String, Map)}. * * @deprecated in favor of {@link AttributeTransformer} */ @Deprecated protected Map untransformAttributes(Class clazz, Map attributeValues) { Method hashKeyGetter = reflector.getPrimaryHashKeyGetter(clazz); String hashKeyName = reflector.getAttributeName(hashKeyGetter); Method rangeKeyGetter = reflector.getPrimaryRangeKeyGetter(clazz); String rangeKeyName = rangeKeyGetter == null ? null : reflector.getAttributeName(rangeKeyGetter); return untransformAttributes(hashKeyName, rangeKeyName, attributeValues); } /** * Transforms the attribute values after loading from DynamoDb. * Only ever called by {@link #untransformAttributes(Class, Map)}. * By default, returns the attributes unchanged. * * @param hashKey the attribute name of the hash key * @param rangeKey the attribute name of the range key (or null if there is none) * @param attributeValues * @return the decrypted attributes * @deprecated in favor of {@link AttributeTransformer} */ @Deprecated protected Map untransformAttributes(String hashKey, String rangeKey, Map attributeValues) { return attributeValues; } private Map transformAttributes( final AttributeTransformer.Parameters parameters) { if (transformer != null) { return transformer.transform(parameters); } return transformAttributes( parameters.getModelClass(), parameters.getAttributeValues()); } /** * By default, just calls {@link #transformAttributes(String, String, Map)}. * * @param clazz * @param attributeValues * @return the decrypted attribute values * @deprecated in favor of {@link AttributeTransformer} */ @Deprecated protected Map transformAttributes(Class clazz, Map attributeValues) { Method hashKeyGetter = reflector.getPrimaryHashKeyGetter(clazz); String hashKeyName = reflector.getAttributeName(hashKeyGetter); Method rangeKeyGetter = reflector.getPrimaryRangeKeyGetter(clazz); String rangeKeyName = rangeKeyGetter == null ? null : reflector.getAttributeName(rangeKeyGetter); return transformAttributes(hashKeyName, rangeKeyName, attributeValues); } /** * Transform attribute values prior to storing in DynamoDB. * Only ever called by {@link #transformAttributes(Class, Map)}. * By default, returns the attributes unchanged. * * @param hashKey the attribute name of the hash key * @param rangeKey the attribute name of the range key (or null if there is none) * @param attributeValues * @return the encrypted attributes * @deprecated in favor of {@link AttributeTransformer} */ @Deprecated protected Map transformAttributes(String hashKey, String rangeKey, Map attributeValues) { return attributeValues; } private Map transformAttributeUpdates( final Class clazz, final Map keys, final Map updateValues, final DynamoDBMapperConfig config ) { Map item = convertToItem(updateValues); HashSet keysAdded = new HashSet(); for (Map.Entry e : keys.entrySet()) { if (!item.containsKey(e.getKey())) { keysAdded.add(e.getKey()); item.put(e.getKey(), e.getValue()); } } AttributeTransformer.Parameters parameters = toParameters(item, true, clazz, config); String hashKey = parameters.getHashKeyName(); if (!item.containsKey(hashKey)) { item.put(hashKey, keys.get(hashKey)); } item = transformAttributes(parameters); for(Map.Entry entry: item.entrySet()) { if (keysAdded.contains(entry.getKey())) { // This was added in for context before calling // transformAttributes, but isn't actually being changed. continue; } AttributeValueUpdate update = updateValues.get(entry.getKey()); if (update != null) { update.getValue() .withB( entry.getValue().getB() ) .withBS(entry.getValue().getBS()) .withN( entry.getValue().getN() ) .withNS(entry.getValue().getNS()) .withS( entry.getValue().getS() ) .withSS(entry.getValue().getSS()); } else { updateValues.put(entry.getKey(), new AttributeValueUpdate(entry.getValue(), "PUT")); } } return updateValues; } private void pauseExponentially(int retries) { if (retries == 0) { return; } Random random = new Random(); long delay = 0; long scaleFactor = 500 + random.nextInt(100); delay = (long) (Math.pow(2, retries) * scaleFactor); delay = Math.min(delay, MAX_BACKOFF_IN_MILLISECONDS); try { Thread.sleep(delay); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new AmazonClientException(e.getMessage(), e); } } static X applyUserAgent(X request) { request.getRequestClientOptions().appendUserAgent(USER_AGENT); return request; } /** * The return type of batchWrite, batchDelete and batchSave. It contains the information about the unprocessed items * and the exception causing the failure. * */ public static class FailedBatch { private Map> unprocessedItems; private Exception exception; public void setUnprocessedItems(Map> unprocessedItems) { this.unprocessedItems = unprocessedItems; } public Map> getUnprocessedItems() { return unprocessedItems; } public void setException(Exception excetpion) { this.exception = excetpion; } public Exception getException() { return exception; } } /** * Returns the underlying {@link S3ClientCache} for accessing S3. */ public S3ClientCache getS3ClientCache() { return s3cc; } /** * Creates an S3Link with the specified bucket name and key using the * default S3 region. * This method requires the mapper to have been initialized with the * necessary credentials for accessing S3. * * @throws IllegalStateException if the mapper has not been constructed * with the necessary S3 AWS credentials. */ public S3Link createS3Link(String bucketName, String key) { return createS3Link(null, bucketName , key); } /** * Creates an S3Link with the specified region, bucket name and key. * This method requires the mapper to have been initialized with the * necessary credentials for accessing S3. * * @throws IllegalStateException if the mapper has not been constructed * with the necessary S3 AWS credentials. */ public S3Link createS3Link(Region s3region, String bucketName, String key) { if ( s3cc == null ) { throw new IllegalStateException("Mapper must be constructed with S3 AWS Credentials to create S3Link"); } return new S3Link(s3cc, s3region, bucketName , key); } /** * Parse the given POJO class and return the CreateTableRequest for the * DynamoDB table it represents. Note that the returned request does not * include the required ProvisionedThroughput parameters for the primary * table and the GSIs, and that all secondary indexes are initialized with * the default projection type - KEY_ONLY. */ public CreateTableRequest generateCreateTableRequest(Class clazz) { return schemaParser.parseTablePojoToCreateTableRequest(clazz, config, reflector); } }