Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Hibernate OGM, Domain model persistence for NoSQL datastores
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or .
*/
package org.hibernate.ogm.datastore.mongodb;
import static java.lang.Boolean.FALSE;
import static org.hibernate.ogm.datastore.document.impl.DotPatternMapHelpers.getColumnSharedPrefixOfAssociatedEntityLink;
import static org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoHelpers.hasField;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.bson.types.ObjectId;
import org.hibernate.AssertionFailure;
import org.hibernate.ogm.datastore.document.association.impl.DocumentHelpers;
import org.hibernate.ogm.datastore.document.cfg.DocumentStoreProperties;
import org.hibernate.ogm.datastore.document.impl.DotPatternMapHelpers;
import org.hibernate.ogm.datastore.document.impl.EmbeddableStateFinder;
import org.hibernate.ogm.datastore.document.options.AssociationStorageType;
import org.hibernate.ogm.datastore.document.options.MapStorageType;
import org.hibernate.ogm.datastore.document.options.spi.AssociationStorageOption;
import org.hibernate.ogm.datastore.map.impl.MapTupleSnapshot;
import org.hibernate.ogm.datastore.mongodb.configuration.impl.MongoDBConfiguration;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.AssociationStorageStrategy;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoDBAssociationSnapshot;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoDBTupleSnapshot;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoHelpers;
import org.hibernate.ogm.datastore.mongodb.impl.MongoDBDatastoreProvider;
import org.hibernate.ogm.datastore.mongodb.logging.impl.Log;
import org.hibernate.ogm.datastore.mongodb.logging.impl.LoggerFactory;
import org.hibernate.ogm.datastore.mongodb.options.AssociationDocumentStorageType;
import org.hibernate.ogm.datastore.mongodb.options.impl.AssociationDocumentStorageOption;
import org.hibernate.ogm.datastore.mongodb.options.impl.ReadPreferenceOption;
import org.hibernate.ogm.datastore.mongodb.options.impl.WriteConcernOption;
import org.hibernate.ogm.datastore.mongodb.query.impl.MongoDBQueryDescriptor;
import org.hibernate.ogm.datastore.mongodb.query.parsing.nativequery.impl.MongoDBQueryDescriptorBuilder;
import org.hibernate.ogm.datastore.mongodb.query.parsing.nativequery.impl.NativeQueryParser;
import org.hibernate.ogm.datastore.mongodb.type.impl.ObjectIdGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.StringAsObjectIdGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.StringAsObjectIdType;
import org.hibernate.ogm.dialect.batch.spi.BatchableGridDialect;
import org.hibernate.ogm.dialect.batch.spi.GroupedChangesToEntityOperation;
import org.hibernate.ogm.dialect.batch.spi.InsertOrUpdateAssociationOperation;
import org.hibernate.ogm.dialect.batch.spi.InsertOrUpdateTupleOperation;
import org.hibernate.ogm.dialect.batch.spi.Operation;
import org.hibernate.ogm.dialect.batch.spi.OperationsQueue;
import org.hibernate.ogm.dialect.batch.spi.RemoveAssociationOperation;
import org.hibernate.ogm.dialect.batch.spi.RemoveTupleOperation;
import org.hibernate.ogm.dialect.identity.spi.IdentityColumnAwareGridDialect;
import org.hibernate.ogm.dialect.multiget.spi.MultigetGridDialect;
import org.hibernate.ogm.dialect.optimisticlock.spi.OptimisticLockingAwareGridDialect;
import org.hibernate.ogm.dialect.query.spi.BackendQuery;
import org.hibernate.ogm.dialect.query.spi.ClosableIterator;
import org.hibernate.ogm.dialect.query.spi.NoOpParameterMetadataBuilder;
import org.hibernate.ogm.dialect.query.spi.ParameterMetadataBuilder;
import org.hibernate.ogm.dialect.query.spi.QueryParameters;
import org.hibernate.ogm.dialect.query.spi.QueryableGridDialect;
import org.hibernate.ogm.dialect.spi.AssociationContext;
import org.hibernate.ogm.dialect.spi.AssociationTypeContext;
import org.hibernate.ogm.dialect.spi.BaseGridDialect;
import org.hibernate.ogm.dialect.spi.DuplicateInsertPreventionStrategy;
import org.hibernate.ogm.dialect.spi.ModelConsumer;
import org.hibernate.ogm.dialect.spi.NextValueRequest;
import org.hibernate.ogm.dialect.spi.OperationContext;
import org.hibernate.ogm.dialect.spi.TransactionContext;
import org.hibernate.ogm.dialect.spi.TupleAlreadyExistsException;
import org.hibernate.ogm.dialect.spi.TupleContext;
import org.hibernate.ogm.dialect.spi.TuplesSupplier;
import org.hibernate.ogm.dialect.spi.TupleTypeContext;
import org.hibernate.ogm.entityentry.impl.TuplePointer;
import org.hibernate.ogm.model.key.spi.AssociationKey;
import org.hibernate.ogm.model.key.spi.AssociationKeyMetadata;
import org.hibernate.ogm.model.key.spi.AssociationKind;
import org.hibernate.ogm.model.key.spi.AssociationType;
import org.hibernate.ogm.model.key.spi.EntityKey;
import org.hibernate.ogm.model.key.spi.EntityKeyMetadata;
import org.hibernate.ogm.model.key.spi.IdSourceKey;
import org.hibernate.ogm.model.key.spi.RowKey;
import org.hibernate.ogm.model.spi.Association;
import org.hibernate.ogm.model.spi.Tuple;
import org.hibernate.ogm.model.spi.Tuple.SnapshotType;
import org.hibernate.ogm.model.spi.TupleOperation;
import org.hibernate.ogm.type.impl.ByteStringType;
import org.hibernate.ogm.type.impl.CharacterStringType;
import org.hibernate.ogm.type.impl.StringCalendarDateType;
import org.hibernate.ogm.type.spi.GridType;
import org.hibernate.ogm.util.impl.CollectionHelper;
import org.hibernate.ogm.util.impl.StringHelper;
import org.hibernate.type.StandardBasicTypes;
import org.hibernate.type.Type;
import org.parboiled.Parboiled;
import org.parboiled.errors.ErrorUtils;
import org.parboiled.parserunners.RecoveringParseRunner;
import org.parboiled.support.ParsingResult;
import com.mongodb.AggregationOutput;
import com.mongodb.BasicDBObject;
import com.mongodb.BulkWriteOperation;
import com.mongodb.BulkWriteResult;
import com.mongodb.DB;
import com.mongodb.DBCollection;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
import com.mongodb.DuplicateKeyException;
import com.mongodb.ReadPreference;
import com.mongodb.WriteConcern;
import com.mongodb.WriteResult;
/**
* Each Tuple entry is stored as a property in a MongoDB document.
*
* Each association is stored in an association document containing three properties:
* - the association table name (optionally)
* - the RowKey column names and values
* - the tuples as an array of elements
*
* Associations can be stored as:
* - one MongoDB collection per association class. The collection name is prefixed.
* - one MongoDB collection for all associations (the association table name property in then used)
* - embed the collection info in the owning entity document is planned but not supported at the moment (OGM-177)
*
* Collection of embeddable are stored within the owning entity document under the
* unqualified collection role
*
* In MongoDB is possible to batch operations but only for the creation of new documents
* and only if they don't have invalid characters in the field name.
* If these conditions are not met, the MongoDB mechanism for batch operations
* is not going to be used.
*
* @author Guillaume Scheibel <[email protected]>
* @author Alan Fitton <alan at eth0.org.uk>
* @author Emmanuel Bernard <[email protected]>
* @author Thorsten Möller <[email protected]>
* @author Guillaume Smet
*/
public class MongoDBDialect extends BaseGridDialect implements QueryableGridDialect, BatchableGridDialect, IdentityColumnAwareGridDialect, MultigetGridDialect, OptimisticLockingAwareGridDialect {
public static final String ID_FIELDNAME = "_id";
public static final String PROPERTY_SEPARATOR = ".";
public static final String ROWS_FIELDNAME = "rows";
public static final String TABLE_FIELDNAME = "table";
public static final String ASSOCIATIONS_COLLECTION_PREFIX = "associations_";
private static final Log log = LoggerFactory.getLogger();
private static final List ROWS_FIELDNAME_LIST = Collections.singletonList( ROWS_FIELDNAME );
/**
* Pattern used to recognize a constraint violation on the primary key.
*
* MongoDB returns an exception with {@code .$_id_ } or {@code _id_ } while Fongo returns an exception with {@code ._id }
*/
private static final Pattern PRIMARY_KEY_CONSTRAINT_VIOLATION_MESSAGE = Pattern.compile( ".*[. ]\\$?_id_? .*" );
private final MongoDBDatastoreProvider provider;
private final DB currentDB;
public MongoDBDialect(MongoDBDatastoreProvider provider) {
this.provider = provider;
this.currentDB = this.provider.getDatabase();
}
@Override
public Tuple getTuple(EntityKey key, OperationContext operationContext) {
DBObject found = this.getObject( key, operationContext );
return createTuple( key, operationContext, found );
}
@Override
public List getTuples(EntityKey[] keys, TupleContext tupleContext) {
if ( keys.length == 0 ) {
return Collections.emptyList();
}
Object[] searchObjects = new Object[keys.length];
for ( int i = 0; i < keys.length; i++ ) {
searchObjects[i] = prepareIdObjectValue( keys[i].getColumnNames(), keys[i].getColumnValues() );
}
DBCursor cursor = this.getObjects( keys[0].getMetadata(), searchObjects, tupleContext );
try {
return tuplesResult( keys, searchObjects, tupleContext, cursor );
}
finally {
if ( cursor != null ) {
cursor.close();
}
}
}
/*
* This method assumes that the entries in the cursor might not be in the same order as the keys and some keys might
* not have a matching result in the db.
*/
private static List tuplesResult(EntityKey[] keys, Object[] searchObjects, TupleContext tupleContext, DBCursor cursor) {
// The list is initialized with null because some keys might not have a corresponding value in the cursor
Tuple[] tuples = new Tuple[searchObjects.length];
for ( DBObject dbObject : cursor ) {
for ( int i = 0; i < searchObjects.length; i++ ) {
if ( dbObject.get( ID_FIELDNAME ).equals( searchObjects[i] ) ) {
tuples[i] = createTuple( keys[i], tupleContext, dbObject );
// We assume there are no duplicated keys
break;
}
}
}
return Arrays.asList( tuples );
}
private static Tuple createTuple(EntityKey key, OperationContext operationContext, DBObject found) {
if ( found != null ) {
return new Tuple( new MongoDBTupleSnapshot( found, key.getMetadata() ), SnapshotType.UPDATE );
}
else if ( isInTheInsertionQueue( key, operationContext ) ) {
// The key has not been inserted in the db but it is in the queue
return new Tuple( new MongoDBTupleSnapshot( prepareIdObject( key ), key.getMetadata() ), SnapshotType.INSERT );
}
else {
return null;
}
}
@Override
public Tuple createTuple(EntityKeyMetadata entityKeyMetadata, OperationContext operationContext) {
return new Tuple( new MongoDBTupleSnapshot( new BasicDBObject(), entityKeyMetadata ), SnapshotType.INSERT );
}
@Override
public Tuple createTuple(EntityKey key, OperationContext OperationContext) {
DBObject toSave = prepareIdObject( key );
return new Tuple( new MongoDBTupleSnapshot( toSave, key.getMetadata() ), SnapshotType.INSERT );
}
/**
* Returns a {@link DBObject} representing the entity which embeds the specified association.
*/
private DBObject getEmbeddingEntity(AssociationKey key, AssociationContext associationContext) {
DBObject embeddingEntityDocument = associationContext.getEntityTuplePointer().getTuple() != null ?
( (MongoDBTupleSnapshot) associationContext.getEntityTuplePointer().getTuple().getSnapshot() ).getDbObject() : null;
if ( embeddingEntityDocument != null ) {
return embeddingEntityDocument;
}
else {
ReadPreference readPreference = getReadPreference( associationContext );
DBCollection collection = getCollection( key.getEntityKey() );
DBObject searchObject = prepareIdObject( key.getEntityKey() );
DBObject projection = getProjection( key, true );
return collection.findOne( searchObject, projection, readPreference );
}
}
private DBObject getObject(EntityKey key, OperationContext operationContext) {
ReadPreference readPreference = getReadPreference( operationContext );
DBCollection collection = getCollection( key );
DBObject searchObject = prepareIdObject( key );
BasicDBObject projection = getProjection( operationContext );
return collection.findOne( searchObject, projection, readPreference );
}
private DBCursor getObjects(EntityKeyMetadata entityKeyMetadata, Object[] searchObjects, TupleContext tupleContext) {
ReadPreference readPreference = getReadPreference( tupleContext );
DBCollection collection = getCollection( entityKeyMetadata );
collection.setReadPreference( readPreference );
BasicDBObject projection = getProjection( tupleContext );
DBObject query = new BasicDBObject();
query.put( ID_FIELDNAME, new BasicDBObject( "$in", searchObjects ) );
return collection.find( query, projection );
}
private static BasicDBObject getProjection(OperationContext operationContext) {
return getProjection( operationContext.getTupleTypeContext().getSelectableColumns() );
}
/**
* Returns a projection object for specifying the fields to retrieve during a specific find operation.
*/
private static BasicDBObject getProjection(List fieldNames) {
BasicDBObject projection = new BasicDBObject( fieldNames.size() );
for ( String column : fieldNames ) {
projection.put( column, 1 );
}
return projection;
}
/**
* Create a DBObject which represents the _id field.
* In case of simple id objects the json representation will look like {_id: "theIdValue"}
* In case of composite id objects the json representation will look like {_id: {author: "Guillaume", title: "What this method is used for?"}}
*
* @param key
*
* @return the DBObject which represents the id field
*/
private static BasicDBObject prepareIdObject(EntityKey key) {
return prepareIdObject( key.getColumnNames(), key.getColumnValues() );
}
private static BasicDBObject prepareIdObject(IdSourceKey key) {
return prepareIdObject( key.getColumnName(), key.getColumnValue() );
}
private static BasicDBObject prepareIdObject(String columnName, String columnValue) {
return new BasicDBObject( ID_FIELDNAME, prepareIdObjectValue( columnName, columnValue ) );
}
private static BasicDBObject prepareIdObject(String[] columnNames, Object[] columnValues) {
return new BasicDBObject( ID_FIELDNAME, prepareIdObjectValue( columnNames, columnValues ) );
}
private static Object prepareIdObjectValue(String columnName, String columnValue) {
return columnValue;
}
private static Object prepareIdObjectValue(String[] columnNames, Object[] columnValues) {
if ( columnNames.length == 1 ) {
return columnValues[0];
}
else {
DBObject idObject = new BasicDBObject();
for ( int i = 0; i < columnNames.length; i++ ) {
String columnName = columnNames[i];
Object columnValue = columnValues[i];
if ( columnName.contains( PROPERTY_SEPARATOR ) ) {
int dotIndex = columnName.indexOf( PROPERTY_SEPARATOR );
String shortColumnName = columnName.substring( dotIndex + 1 );
idObject.put( shortColumnName, columnValue );
}
else {
idObject.put( columnNames[i], columnValue );
}
}
return idObject;
}
}
private DBCollection getCollection(String table) {
return currentDB.getCollection( table );
}
private DBCollection getCollection(EntityKey key) {
return getCollection( key.getTable() );
}
private DBCollection getCollection(EntityKeyMetadata entityKeyMetadata) {
return getCollection( entityKeyMetadata.getTable() );
}
private DBCollection getAssociationCollection(AssociationKey key, AssociationStorageStrategy storageStrategy) {
if ( storageStrategy == AssociationStorageStrategy.GLOBAL_COLLECTION ) {
return getCollection( MongoDBConfiguration.DEFAULT_ASSOCIATION_STORE );
}
else {
return getCollection( ASSOCIATIONS_COLLECTION_PREFIX + key.getTable() );
}
}
private static BasicDBObject getSubQuery(String operator, BasicDBObject query) {
return query.get( operator ) != null ? (BasicDBObject) query.get( operator ) : new BasicDBObject();
}
private static void addSubQuery(String operator, BasicDBObject query, String column, Object value) {
BasicDBObject subQuery = getSubQuery( operator, query );
query.append( operator, subQuery.append( column, value ) );
}
private static void addSetToQuery(BasicDBObject query, String column, Object value) {
removeSubQuery( "$unset", query, column );
addSubQuery( "$set", query, column, value );
}
private static void addUnsetToQuery(BasicDBObject query, String column) {
removeSubQuery( "$set", query, column );
addSubQuery( "$unset", query, column, Integer.valueOf( 1 ) );
}
private static void removeSubQuery(String operator, BasicDBObject query, String column) {
BasicDBObject subQuery = getSubQuery( operator, query );
subQuery.removeField( column );
if ( subQuery.isEmpty() ) {
query.removeField( operator );
}
}
@Override
public void insertOrUpdateTuple(EntityKey key, TuplePointer tuplePointer, TupleContext tupleContext) {
throw new UnsupportedOperationException( "Method not supported in GridDialect anymore" );
}
@Override
//TODO deal with dotted column names once this method is used for ALL / Dirty optimistic locking
public boolean updateTupleWithOptimisticLock(EntityKey entityKey, Tuple oldLockState, Tuple tuple, TupleContext tupleContext) {
BasicDBObject idObject = prepareIdObject( entityKey );
for ( String versionColumn : oldLockState.getColumnNames() ) {
idObject.put( versionColumn, oldLockState.get( versionColumn ) );
}
BasicDBObject updater = objectForUpdate( tuple, tupleContext );
if ( updater.isEmpty() ) {
return false;
}
DBObject doc = getCollection( entityKey ).findAndModify( idObject, updater );
return doc != null;
}
@Override
public void insertTuple(EntityKeyMetadata entityKeyMetadata, Tuple tuple, TupleContext tupleContext) {
WriteConcern writeConcern = getWriteConcern( tupleContext );
DBObject objectWithId = insertDBObject( entityKeyMetadata, tuple, writeConcern );
String idColumnName = entityKeyMetadata.getColumnNames()[0];
tuple.put( idColumnName, objectWithId.get( ID_FIELDNAME ) );
}
/*
* Insert the tuple and return an object containing the id in the field ID_FIELDNAME
*/
private DBObject insertDBObject(EntityKeyMetadata entityKeyMetadata, Tuple tuple, WriteConcern writeConcern) {
DBObject dbObject = objectForInsert( tuple, ( (MongoDBTupleSnapshot) tuple.getSnapshot() ).getDbObject() );
getCollection( entityKeyMetadata ).insert( dbObject, writeConcern );
return dbObject;
}
/**
* Creates a DBObject that can be passed to the MongoDB batch insert function
*/
private static DBObject objectForInsert(Tuple tuple, DBObject dbObject) {
MongoDBTupleSnapshot snapshot = (MongoDBTupleSnapshot) tuple.getSnapshot();
for ( TupleOperation operation : tuple.getOperations() ) {
String column = operation.getColumn();
if ( notInIdField( snapshot, column ) ) {
switch ( operation.getType() ) {
case PUT:
MongoHelpers.setValue( dbObject, column, operation.getValue() );
break;
case PUT_NULL:
case REMOVE:
MongoHelpers.resetValue( dbObject, column );
break;
}
}
}
return dbObject;
}
private static BasicDBObject objectForUpdate(Tuple tuple, TupleContext tupleContext) {
return objectForUpdate( tuple, tupleContext, new BasicDBObject() );
}
private static BasicDBObject objectForUpdate(Tuple tuple, TupleContext tupleContext, BasicDBObject updateStatement) {
MongoDBTupleSnapshot snapshot = (MongoDBTupleSnapshot) tuple.getSnapshot();
EmbeddableStateFinder embeddableStateFinder = new EmbeddableStateFinder( tuple, tupleContext );
Set nullEmbeddables = new HashSet<>();
for ( TupleOperation operation : tuple.getOperations() ) {
String column = operation.getColumn();
if ( notInIdField( snapshot, column ) ) {
switch ( operation.getType() ) {
case PUT:
addSetToQuery( updateStatement, column, operation.getValue() );
break;
case PUT_NULL:
case REMOVE:
// try and find if this column is within an embeddable and if that embeddable is null
// if true, unset the full embeddable
String nullEmbeddable = embeddableStateFinder.getOuterMostNullEmbeddableIfAny( column );
if ( nullEmbeddable != null ) {
// we have a null embeddable
if ( ! nullEmbeddables.contains( nullEmbeddable ) ) {
// we have not processed it yet
addUnsetToQuery( updateStatement, nullEmbeddable );
nullEmbeddables.add( nullEmbeddable );
}
}
else {
// simply unset the column
addUnsetToQuery( updateStatement, column );
}
break;
}
}
}
return updateStatement;
}
private static boolean notInIdField(MongoDBTupleSnapshot snapshot, String column) {
return !column.equals( ID_FIELDNAME ) && !column.endsWith( PROPERTY_SEPARATOR + ID_FIELDNAME ) && !snapshot.isKeyColumn( column );
}
@Override
public void removeTuple(EntityKey key, TupleContext tupleContext) {
DBCollection collection = getCollection( key );
DBObject toDelete = prepareIdObject( key );
WriteConcern writeConcern = getWriteConcern( tupleContext );
collection.remove( toDelete, writeConcern );
}
@Override
public boolean removeTupleWithOptimisticLock(EntityKey entityKey, Tuple oldLockState, TupleContext tupleContext) {
DBObject toDelete = prepareIdObject( entityKey );
for ( String versionColumn : oldLockState.getColumnNames() ) {
toDelete.put( versionColumn, oldLockState.get( versionColumn ) );
}
DBCollection collection = getCollection( entityKey );
DBObject deleted = collection.findAndRemove( toDelete );
return deleted != null;
}
//not for embedded
private DBObject findAssociation(AssociationKey key, AssociationContext associationContext, AssociationStorageStrategy storageStrategy) {
ReadPreference readPreference = getReadPreference( associationContext );
final DBObject associationKeyObject = associationKeyToObject( key, storageStrategy );
return getAssociationCollection( key, storageStrategy ).findOne( associationKeyObject, getProjection( key, false ), readPreference );
}
private static DBObject getProjection(AssociationKey key, boolean embedded) {
if ( embedded ) {
return getProjection( Collections.singletonList( key.getMetadata().getCollectionRole() ) );
}
else {
return getProjection( ROWS_FIELDNAME_LIST );
}
}
private static boolean isInTheInsertionQueue(EntityKey key, AssociationContext associationContext) {
OperationsQueue queue = associationContext.getOperationsQueue();
return queue != null && queue.isInTheInsertionQueue( key );
}
@Override
public Association getAssociation(AssociationKey key, AssociationContext associationContext) {
AssociationStorageStrategy storageStrategy = getAssociationStorageStrategy( key, associationContext );
if ( isEmbeddedAssociation( key ) && isInTheInsertionQueue( key.getEntityKey(), associationContext ) ) {
// The association is embedded and the owner of the association is in the insertion queue
DBObject idObject = prepareIdObject( key.getEntityKey() );
return new Association( new MongoDBAssociationSnapshot( idObject, key, storageStrategy ) );
}
// We need to execute the previous operations first or it won't be able to find the key that should have
// been created
executeBatch( associationContext.getOperationsQueue() );
if ( storageStrategy == AssociationStorageStrategy.IN_ENTITY ) {
DBObject entity = getEmbeddingEntity( key, associationContext );
if ( entity != null && hasField( entity, key.getMetadata().getCollectionRole() ) ) {
return new Association( new MongoDBAssociationSnapshot( entity, key, storageStrategy ) );
}
else {
return null;
}
}
final DBObject result = findAssociation( key, associationContext, storageStrategy );
if ( result == null ) {
return null;
}
else {
return new Association( new MongoDBAssociationSnapshot( result, key, storageStrategy ) );
}
}
private static boolean isEmbeddedAssociation(AssociationKey key) {
return AssociationKind.EMBEDDED_COLLECTION == key.getMetadata().getAssociationKind();
}
@Override
public Association createAssociation(AssociationKey key, AssociationContext associationContext) {
AssociationStorageStrategy storageStrategy = getAssociationStorageStrategy( key, associationContext );
DBObject document = storageStrategy == AssociationStorageStrategy.IN_ENTITY
? getEmbeddingEntity( key, associationContext )
: associationKeyToObject( key, storageStrategy );
Association association = new Association( new MongoDBAssociationSnapshot( document, key, storageStrategy ) );
// in the case of an association stored in the entity structure, we might end up with rows present in the
// current snapshot of the entity while we want an empty association here. So, in this case, we clear the
// snapshot to be sure the association created is empty.
if ( !association.isEmpty() ) {
association.clear();
}
return association;
}
/**
* Returns the rows of the given association as to be stored in the database. The return value is one of the
* following:
*
*
A list of plain values such as {@code String}s, {@code int}s etc. in case there is exactly one row key column
* which is not part of the association key (in this case we don't need to persist the key name as it can be
* restored from the association key upon loading) or
*
A list of {@code DBObject}s with keys/values for all row key columns which are not part of the association
* key
*
A {@link DBObject} with a key for each entry in case the given association has exactly one row key column
* which is of type {@code String} (e.g. a hash map) and {@link DocumentStoreProperties#MAP_STORAGE} is not set to
* {@link MapStorageType#AS_LIST}. The map values will either be plain values (in case it's single values) or
* another {@code DBObject}.
*
*/
private static Object getAssociationRows(Association association, AssociationKey key, AssociationContext associationContext) {
boolean organizeByRowKey = DotPatternMapHelpers.organizeAssociationMapByRowKey( association, key, associationContext );
// transform map entries such as ( addressType='home', address_id=123) into the more
// natural ( { 'home'=123 }
if ( organizeByRowKey ) {
String rowKeyColumn = organizeByRowKey ? key.getMetadata().getRowKeyIndexColumnNames()[0] : null;
DBObject rows = new BasicDBObject();
for ( RowKey rowKey : association.getKeys() ) {
DBObject row = (DBObject) getAssociationRow( association.get( rowKey ), key );
String rowKeyValue = (String) row.removeField( rowKeyColumn );
// if there is a single column on the value side left, unwrap it
if ( row.keySet().size() == 1 ) {
rows.put( rowKeyValue, row.toMap().values().iterator().next() );
}
else {
rows.put( rowKeyValue, row );
}
}
return rows;
}
// non-map rows can be taken as is
else {
List