Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Hibernate OGM, Domain model persistence for NoSQL datastores
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or .
*/
package org.hibernate.ogm.datastore.mongodb;
import static java.lang.Boolean.FALSE;
import static org.hibernate.ogm.datastore.document.impl.DotPatternMapHelpers.getColumnSharedPrefixOfAssociatedEntityLink;
import static org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoHelpers.hasField;
import java.lang.invoke.MethodHandles;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.bson.BsonDocument;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.hibernate.AssertionFailure;
import org.hibernate.ogm.datastore.document.association.impl.DocumentHelpers;
import org.hibernate.ogm.datastore.document.cfg.DocumentStoreProperties;
import org.hibernate.ogm.datastore.document.impl.DotPatternMapHelpers;
import org.hibernate.ogm.datastore.document.impl.EmbeddableStateFinder;
import org.hibernate.ogm.datastore.document.options.AssociationStorageType;
import org.hibernate.ogm.datastore.document.options.MapStorageType;
import org.hibernate.ogm.datastore.document.options.spi.AssociationStorageOption;
import org.hibernate.ogm.datastore.map.impl.MapTupleSnapshot;
import org.hibernate.ogm.datastore.mongodb.binarystorage.GridFSStorageManager;
import org.hibernate.ogm.datastore.mongodb.configuration.impl.MongoDBConfiguration;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.AssociationStorageStrategy;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoDBAssociationSnapshot;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoDBTupleSnapshot;
import org.hibernate.ogm.datastore.mongodb.dialect.impl.MongoHelpers;
import org.hibernate.ogm.datastore.mongodb.impl.MongoDBDatastoreProvider;
import org.hibernate.ogm.datastore.mongodb.logging.impl.Log;
import org.hibernate.ogm.datastore.mongodb.logging.impl.LoggerFactory;
import org.hibernate.ogm.datastore.mongodb.options.AssociationDocumentStorageType;
import org.hibernate.ogm.datastore.mongodb.options.impl.AssociationDocumentStorageOption;
import org.hibernate.ogm.datastore.mongodb.options.impl.ReadConcernOption;
import org.hibernate.ogm.datastore.mongodb.options.impl.ReadPreferenceOption;
import org.hibernate.ogm.datastore.mongodb.options.impl.WriteConcernOption;
import org.hibernate.ogm.datastore.mongodb.query.impl.MongoDBQueryDescriptor;
import org.hibernate.ogm.datastore.mongodb.query.parsing.nativequery.impl.MongoDBQueryDescriptorBuilder;
import org.hibernate.ogm.datastore.mongodb.query.parsing.nativequery.impl.NativeQueryParser;
import org.hibernate.ogm.datastore.mongodb.type.GeoCollection;
import org.hibernate.ogm.datastore.mongodb.type.GeoLineString;
import org.hibernate.ogm.datastore.mongodb.type.GeoMultiLineString;
import org.hibernate.ogm.datastore.mongodb.type.GeoMultiPoint;
import org.hibernate.ogm.datastore.mongodb.type.GeoMultiPolygon;
import org.hibernate.ogm.datastore.mongodb.type.GeoPoint;
import org.hibernate.ogm.datastore.mongodb.type.GeoPolygon;
import org.hibernate.ogm.datastore.mongodb.type.impl.BinaryAsBsonBinaryGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoCollectionGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoLineStringGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoMultiLineStringGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoMultiPointGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoMultiPolygonGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoPointGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GeoPolygonGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GridFSGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.GridFSType;
import org.hibernate.ogm.datastore.mongodb.type.impl.ObjectIdGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.SerializableAsBinaryGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.StringAsObjectIdGridType;
import org.hibernate.ogm.datastore.mongodb.type.impl.StringAsObjectIdType;
import org.hibernate.ogm.datastore.mongodb.utils.DocumentUtil;
import org.hibernate.ogm.dialect.batch.spi.BatchableGridDialect;
import org.hibernate.ogm.dialect.batch.spi.GroupedChangesToEntityOperation;
import org.hibernate.ogm.dialect.batch.spi.InsertOrUpdateAssociationOperation;
import org.hibernate.ogm.dialect.batch.spi.InsertOrUpdateTupleOperation;
import org.hibernate.ogm.dialect.batch.spi.Operation;
import org.hibernate.ogm.dialect.batch.spi.OperationsQueue;
import org.hibernate.ogm.dialect.batch.spi.RemoveAssociationOperation;
import org.hibernate.ogm.dialect.batch.spi.RemoveTupleOperation;
import org.hibernate.ogm.dialect.identity.spi.IdentityColumnAwareGridDialect;
import org.hibernate.ogm.dialect.multiget.spi.MultigetGridDialect;
import org.hibernate.ogm.dialect.optimisticlock.spi.OptimisticLockingAwareGridDialect;
import org.hibernate.ogm.dialect.query.spi.BackendQuery;
import org.hibernate.ogm.dialect.query.spi.ClosableIterator;
import org.hibernate.ogm.dialect.query.spi.NoOpParameterMetadataBuilder;
import org.hibernate.ogm.dialect.query.spi.ParameterMetadataBuilder;
import org.hibernate.ogm.dialect.query.spi.QueryParameters;
import org.hibernate.ogm.dialect.query.spi.QueryableGridDialect;
import org.hibernate.ogm.dialect.spi.AssociationContext;
import org.hibernate.ogm.dialect.spi.AssociationTypeContext;
import org.hibernate.ogm.dialect.spi.BaseGridDialect;
import org.hibernate.ogm.dialect.spi.DuplicateInsertPreventionStrategy;
import org.hibernate.ogm.dialect.spi.ModelConsumer;
import org.hibernate.ogm.dialect.spi.NextValueRequest;
import org.hibernate.ogm.dialect.spi.OperationContext;
import org.hibernate.ogm.dialect.spi.TransactionContext;
import org.hibernate.ogm.dialect.spi.TupleAlreadyExistsException;
import org.hibernate.ogm.dialect.spi.TupleContext;
import org.hibernate.ogm.dialect.spi.TupleTypeContext;
import org.hibernate.ogm.dialect.spi.TuplesSupplier;
import org.hibernate.ogm.dialect.storedprocedure.spi.StoredProcedureAwareGridDialect;
import org.hibernate.ogm.entityentry.impl.TuplePointer;
import org.hibernate.ogm.model.key.spi.AssociationKey;
import org.hibernate.ogm.model.key.spi.AssociationKeyMetadata;
import org.hibernate.ogm.model.key.spi.AssociationKind;
import org.hibernate.ogm.model.key.spi.AssociationType;
import org.hibernate.ogm.model.key.spi.EntityKey;
import org.hibernate.ogm.model.key.spi.EntityKeyMetadata;
import org.hibernate.ogm.model.key.spi.IdSourceKey;
import org.hibernate.ogm.model.key.spi.RowKey;
import org.hibernate.ogm.model.spi.Association;
import org.hibernate.ogm.model.spi.Tuple;
import org.hibernate.ogm.model.spi.Tuple.SnapshotType;
import org.hibernate.ogm.model.spi.TupleOperation;
import org.hibernate.ogm.options.spi.OptionsContext;
import org.hibernate.ogm.storedprocedure.ProcedureQueryParameters;
import org.hibernate.ogm.type.impl.ByteStringType;
import org.hibernate.ogm.type.impl.CharacterStringType;
import org.hibernate.ogm.type.impl.LocalDateAsStringType;
import org.hibernate.ogm.type.impl.LocalDateTimeAsStringType;
import org.hibernate.ogm.type.impl.LocalTimeAsStringType;
import org.hibernate.ogm.type.impl.StringCalendarDateType;
import org.hibernate.ogm.type.impl.TimestampAsDateType;
import org.hibernate.ogm.type.spi.GridType;
import org.hibernate.ogm.util.impl.CollectionHelper;
import org.hibernate.ogm.util.impl.StringHelper;
import org.hibernate.type.MaterializedBlobType;
import org.hibernate.type.SerializableToBlobType;
import org.hibernate.type.StandardBasicTypes;
import org.hibernate.type.Type;
import org.parboiled.Parboiled;
import org.parboiled.errors.ErrorUtils;
import org.parboiled.parserunners.RecoveringParseRunner;
import org.parboiled.support.ParsingResult;
import com.mongodb.DuplicateKeyException;
import com.mongodb.MongoBulkWriteException;
import com.mongodb.MongoCommandException;
import com.mongodb.ReadConcern;
import com.mongodb.ReadPreference;
import com.mongodb.WriteConcern;
import com.mongodb.bulk.BulkWriteResult;
import com.mongodb.client.AggregateIterable;
import com.mongodb.client.DistinctIterable;
import com.mongodb.client.FindIterable;
import com.mongodb.client.MapReduceIterable;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoCursor;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.BulkWriteOptions;
import com.mongodb.client.model.Collation;
import com.mongodb.client.model.CollationAlternate;
import com.mongodb.client.model.CollationCaseFirst;
import com.mongodb.client.model.CollationMaxVariable;
import com.mongodb.client.model.CollationStrength;
import com.mongodb.client.model.FindOneAndDeleteOptions;
import com.mongodb.client.model.FindOneAndUpdateOptions;
import com.mongodb.client.model.InsertOneModel;
import com.mongodb.client.model.MapReduceAction;
import com.mongodb.client.model.ReplaceOptions;
import com.mongodb.client.model.ReturnDocument;
import com.mongodb.client.model.UpdateOptions;
import com.mongodb.client.result.DeleteResult;
import com.mongodb.client.result.UpdateResult;
/**
* Each Tuple entry is stored as a property in a MongoDB document.
*
* Each association is stored in an association document containing three properties:
* - the association table name (optionally)
* - the RowKey column names and values
* - the tuples as an array of elements
*
* Associations can be stored as:
* - one MongoDB collection per association class. The collection name is prefixed.
* - one MongoDB collection for all associations (the association table name property in then used)
* - embed the collection info in the owning entity document is planned but not supported at the moment (OGM-177)
*
* Collection of embeddable are stored within the owning entity document under the
* unqualified collection role
*
* In MongoDB is possible to batch operations but only for the creation of new documents
* and only if they don't have invalid characters in the field name.
* If these conditions are not met, the MongoDB mechanism for batch operations
* is not going to be used.
*
* @author Guillaume Scheibel <[email protected]>
* @author Alan Fitton <alan at eth0.org.uk>
* @author Emmanuel Bernard <[email protected]>
* @author Thorsten Möller <[email protected]>
* @author Guillaume Smet
* @author Aleksandr Mylnikov
*/
public class MongoDBDialect extends BaseGridDialect implements QueryableGridDialect, BatchableGridDialect, IdentityColumnAwareGridDialect, MultigetGridDialect, OptimisticLockingAwareGridDialect,
StoredProcedureAwareGridDialect {
public static final String ID_FIELDNAME = "_id";
public static final String PROPERTY_SEPARATOR = ".";
public static final String ROWS_FIELDNAME = "rows";
public static final String TABLE_FIELDNAME = "table";
public static final String ASSOCIATIONS_COLLECTION_PREFIX = "associations_";
private static final Log log = LoggerFactory.make( MethodHandles.lookup() );
/**
* This is not Threadsafe!
*/
private static final NativeQueryParser NATIVE_QUERY_PARSER = Parboiled.createParser( NativeQueryParser.class );
private static final List ROWS_FIELDNAME_LIST = Collections.singletonList( ROWS_FIELDNAME );
// match a number with optional '-' and decimal
private static final Pattern NUMBER_PATTERN = Pattern.compile( "\\d+(\\.\\d+)?" );
/**
* Pattern used to recognize a constraint violation on the primary key.
*
* MongoDB returns an exception with {@code .$_id_ } or {@code _id_ } while Fongo returns an exception with {@code ._id }
*/
private static final Pattern PRIMARY_KEY_CONSTRAINT_VIOLATION_MESSAGE = Pattern.compile( ".*[. ]\\$?_id_? .*" );
private final MongoDBDatastoreProvider provider;
private final MongoDatabase currentDB;
public MongoDBDialect(MongoDBDatastoreProvider provider) {
this.provider = provider;
this.currentDB = this.provider.getDatabase();
}
@Override
public Tuple getTuple(EntityKey key, OperationContext operationContext) {
Document found = this.getObject( key, operationContext );
return createTuple( key, operationContext, found );
}
@Override
public List getTuples(EntityKey[] keys, TupleContext tupleContext) {
if ( keys.length == 0 ) {
return Collections.emptyList();
}
Object[] searchObjects = new Object[keys.length];
for ( int i = 0; i < keys.length; i++ ) {
searchObjects[i] = prepareIdObjectValue( keys[i].getColumnNames(), keys[i].getColumnValues() );
}
MongoCursor cursor = this.getObjects( keys[0].getMetadata(), searchObjects, tupleContext );
try {
return tuplesResult( keys, searchObjects, tupleContext, cursor );
}
finally {
if ( cursor != null ) {
cursor.close();
}
}
}
/*
* This method assumes that the entries in the cursor might not be in the same order as the keys and some keys might
* not have a matching result in the db.
*/
private static List tuplesResult(EntityKey[] keys, Object[] searchObjects, TupleContext tupleContext, MongoCursor cursor) {
// The list is initialized with null because some keys might not have a corresponding value in the cursor
Tuple[] tuples = new Tuple[searchObjects.length];
while ( cursor.hasNext() ) {
Document document = cursor.next();
for ( int i = 0; i < searchObjects.length; i++ ) {
if ( document.get( ID_FIELDNAME ).equals( searchObjects[i] ) ) {
tuples[i] = createTuple( keys[i], tupleContext, document );
// We assume there are no duplicated keys
break;
}
}
}
return Arrays.asList( tuples );
}
private static Tuple createTuple(EntityKey key, OperationContext operationContext, Document found) {
if ( found != null ) {
return new Tuple( new MongoDBTupleSnapshot( found, key.getMetadata() ), SnapshotType.UPDATE );
}
else if ( isInTheInsertionQueue( key, operationContext ) ) {
// The key has not been inserted in the db but it is in the queue
return new Tuple( new MongoDBTupleSnapshot( prepareIdObject( key ), key.getMetadata() ), SnapshotType.INSERT );
}
else {
return null;
}
}
@Override
public Tuple createTuple(EntityKeyMetadata entityKeyMetadata, OperationContext operationContext) {
return new Tuple( new MongoDBTupleSnapshot( new Document(), entityKeyMetadata ), SnapshotType.INSERT );
}
@Override
public Tuple createTuple(EntityKey key, OperationContext OperationContext) {
Document toSave = prepareIdObject( key );
return new Tuple( new MongoDBTupleSnapshot( toSave, key.getMetadata() ), SnapshotType.INSERT );
}
/**
* Returns a {@link Document} representing the entity which embeds the specified association.
*/
private Document getEmbeddingEntity(AssociationKey key, AssociationContext associationContext) {
Document embeddingEntityDocument = associationContext.getEntityTuplePointer().getTuple() != null ?
( (MongoDBTupleSnapshot) associationContext.getEntityTuplePointer().getTuple().getSnapshot() ).getDbObject() : null;
if ( embeddingEntityDocument != null ) {
return embeddingEntityDocument;
}
else {
MongoCollection collection = getCollection( key.getEntityKey(), associationContext.getAssociationTypeContext().getOptionsContext() );
Document searchObject = prepareIdObject( key.getEntityKey() );
Document projection = getProjection( key, true );
return collection.find( searchObject ).projection( projection ).first();
}
}
private Document getObject(EntityKey key, OperationContext operationContext) {
MongoCollection collection = getCollection( key, operationContext.getTupleTypeContext().getOptionsContext() );
Document searchObject = prepareIdObject( key );
Document projection = getProjection( operationContext );
FindIterable fi = collection.find( searchObject );
Document targetDocument = fi != null ? fi.projection( projection ).first() : null;
provider.getBinaryStorageManager().loadContentFromBinaryStorage( targetDocument, key.getMetadata() );
return targetDocument;
}
private MongoCursor getObjects(EntityKeyMetadata entityKeyMetadata, Object[] searchObjects, TupleContext
tupleContext) {
MongoCollection collection = getCollection( entityKeyMetadata.getTable(), tupleContext.getTupleTypeContext().getOptionsContext() );
Document projection = getProjection( tupleContext );
Document query = new Document();
query.put( ID_FIELDNAME, new Document( "$in", Arrays.asList( searchObjects ) ) );
return collection.find( query ).projection( projection ).iterator();
}
private static Document getProjection(OperationContext operationContext) {
Set columns = new HashSet<>();
columns.addAll( operationContext.getTupleTypeContext().getPolymorphicEntityColumns() );
columns.addAll( operationContext.getTupleTypeContext().getSelectableColumns() );
return getProjection( new ArrayList<>( columns ) );
}
/**
* Returns a projection object for specifying the fields to retrieve during a specific find operation.
*/
private static Document getProjection(List fieldNames) {
Document projection = new Document();
for ( String column : fieldNames ) {
projection.put( column, 1 );
}
return projection;
}
/**
* Create a Document which represents the _id field.
* In case of simple id objects the json representation will look like {_id: "theIdValue"}
* In case of composite id objects the json representation will look like {_id: {author: "Guillaume", title: "What this method is used for?"}}
*
* @param key
*
* @return the Document which represents the id field
*/
private static Document prepareIdObject(EntityKey key) {
return prepareIdObject( key.getColumnNames(), key.getColumnValues() );
}
private static Document prepareIdObject(IdSourceKey key) {
return prepareIdObject( key.getColumnName(), key.getColumnValue() );
}
private static Document prepareIdObject(String columnName, String columnValue) {
return new Document( ID_FIELDNAME, prepareIdObjectValue( columnName, columnValue ) );
}
private static Document prepareIdObject(String[] columnNames, Object[] columnValues) {
return new Document( ID_FIELDNAME, prepareIdObjectValue( columnNames, columnValues ) );
}
private static Object prepareIdObjectValue(String columnName, String columnValue) {
return columnValue;
}
private static Object prepareIdObjectValue(String[] columnNames, Object[] columnValues) {
if ( columnNames.length == 1 ) {
return columnValues[0];
}
else {
Document idObject = new Document();
for ( int i = 0; i < columnNames.length; i++ ) {
String columnName = columnNames[i];
Object columnValue = columnValues[i];
if ( columnName.contains( PROPERTY_SEPARATOR ) ) {
int dotIndex = columnName.indexOf( PROPERTY_SEPARATOR );
String shortColumnName = columnName.substring( dotIndex + 1 );
idObject.put( shortColumnName, columnValue );
}
else {
idObject.put( columnNames[i], columnValue );
}
}
return idObject;
}
}
private MongoCollection getCollection(String table, OptionsContext context) {
MongoCollection collection = currentDB.getCollection( table );
if ( context != null ) {
return withOptions( collection, context );
}
return collection;
}
private MongoCollection getCollection( EntityKey key) {
return getCollection( key.getTable(), null );
}
private MongoCollection getCollection( EntityKey key, OptionsContext context ) {
return getCollection( key.getTable(), context );
}
private MongoCollection withOptions(MongoCollection collection, OptionsContext context) {
MongoCollection newCollection = collection;
newCollection = withReadPreference( context, newCollection );
newCollection = withReadConcern( context, newCollection );
newCollection = withWriteConcern( context, newCollection );
return newCollection;
}
private MongoCollection withReadConcern(OptionsContext context, MongoCollection newCollection) {
ReadConcern readConcern = context.getUnique( ReadConcernOption.class );
if ( readConcern != null ) {
newCollection = newCollection.withReadConcern( readConcern );
}
return newCollection;
}
private MongoCollection withReadPreference(OptionsContext context, MongoCollection newCollection) {
ReadPreference readPreference = context.getUnique( ReadPreferenceOption.class );
if ( readPreference != null ) {
return newCollection.withReadPreference( readPreference );
}
return newCollection;
}
private MongoCollection withWriteConcern(OptionsContext context, MongoCollection newCollection) {
WriteConcern writeConcern = context.getUnique( WriteConcernOption.class );
if ( writeConcern != null ) {
return newCollection.withWriteConcern( writeConcern );
}
return newCollection;
}
private MongoCollection getAssociationCollection(AssociationKey key, AssociationStorageStrategy storageStrategy, AssociationContext associationContext) {
if ( storageStrategy == AssociationStorageStrategy.GLOBAL_COLLECTION ) {
return getCollection( MongoDBConfiguration.DEFAULT_ASSOCIATION_STORE, associationContext.getAssociationTypeContext().getOptionsContext() );
}
else {
return getCollection( ASSOCIATIONS_COLLECTION_PREFIX + key.getTable(), associationContext.getAssociationTypeContext().getOptionsContext() );
}
}
private static Document getSubQuery(String operator, Document query) {
return query.get( operator ) != null ? (Document) query.get( operator ) : new Document();
}
private static void addSubQuery(String operator, Document query, String column, Object value) {
Document subQuery = getSubQuery( operator, query );
query.append( operator, subQuery.append( column, value ) );
}
private static void addSetToQuery(Document query, String column, Object value) {
removeSubQuery( "$unset", query, column );
addSubQuery( "$set", query, column, value );
}
private static void addUnsetToQuery(Document query, String column) {
removeSubQuery( "$set", query, column );
addSubQuery( "$unset", query, column, Integer.valueOf( 1 ) );
}
private static void removeSubQuery(String operator, Document query, String column) {
Document subQuery = getSubQuery( operator, query );
subQuery.remove( column );
if ( subQuery.isEmpty() ) {
query.remove( operator );
}
}
@Override
public void insertOrUpdateTuple(EntityKey key, TuplePointer tuplePointer, TupleContext tupleContext) {
throw new UnsupportedOperationException( "Method not supported in GridDialect anymore" );
}
@Override
//TODO deal with dotted column names once this method is used for ALL / Dirty optimistic locking
public boolean updateTupleWithOptimisticLock(EntityKey entityKey, Tuple oldLockState, Tuple tuple, TupleContext tupleContext) {
Document idObject = prepareIdObject( entityKey );
for ( String versionColumn : oldLockState.getColumnNames() ) {
idObject.put( versionColumn, oldLockState.get( versionColumn ) );
}
Document updater = objectForUpdate( tuple, tupleContext );
if ( updater.isEmpty() ) {
return false;
}
Document doc = getCollection( entityKey ).findOneAndUpdate( idObject, updater );
return doc != null;
}
@Override
public void insertTuple(EntityKeyMetadata entityKeyMetadata, Tuple tuple, TupleContext tupleContext) {
Document objectWithId = insertDocument( entityKeyMetadata, tuple, tupleContext );
String idColumnName = entityKeyMetadata.getColumnNames()[0];
tuple.put( idColumnName, objectWithId.get( ID_FIELDNAME ) );
}
/*
* InsertOne the tuple and return an object containing the id in the field ID_FIELDNAME
*/
private Document insertDocument(EntityKeyMetadata entityKeyMetadata, Tuple tuple, TupleContext tupleContext ) {
Document dbObject = objectForInsert( tuple, ( (MongoDBTupleSnapshot) tuple.getSnapshot() ).getDbObject() );
getCollection( entityKeyMetadata.getTable(), tupleContext.getTupleTypeContext().getOptionsContext() ).insertOne( dbObject );
return dbObject;
}
/**
* Creates a Document that can be passed to the MongoDB batch insert function
*/
private static Document objectForInsert(Tuple tuple, Document dbObject) {
MongoDBTupleSnapshot snapshot = (MongoDBTupleSnapshot) tuple.getSnapshot();
for ( TupleOperation operation : tuple.getOperations() ) {
String column = operation.getColumn();
if ( notInIdField( snapshot, column ) ) {
switch ( operation.getType() ) {
case PUT:
MongoHelpers.setValue( dbObject, column, operation.getValue() );
break;
case PUT_NULL:
case REMOVE:
MongoHelpers.resetValue( dbObject, column );
break;
}
}
}
return dbObject;
}
private static Document objectForUpdate(Tuple tuple, TupleContext tupleContext) {
return objectForUpdate( tuple, tupleContext, new Document() );
}
private static Document objectForUpdate(Tuple tuple, TupleContext tupleContext, Document updateStatement) {
MongoDBTupleSnapshot snapshot = (MongoDBTupleSnapshot) tuple.getSnapshot();
EmbeddableStateFinder embeddableStateFinder = new EmbeddableStateFinder( tuple, tupleContext );
Set nullEmbeddables = new HashSet<>();
for ( TupleOperation operation : tuple.getOperations() ) {
String column = operation.getColumn();
if ( notInIdField( snapshot, column ) ) {
switch ( operation.getType() ) {
case PUT:
addSetToQuery( updateStatement, column, operation.getValue() );
break;
case PUT_NULL:
case REMOVE:
// try and find if this column is within an embeddable and if that embeddable is null
// if true, unset the full embeddable
String nullEmbeddable = embeddableStateFinder.getOuterMostNullEmbeddableIfAny( column );
if ( nullEmbeddable != null ) {
// we have a null embeddable
if ( ! nullEmbeddables.contains( nullEmbeddable ) ) {
// we have not processed it yet
addUnsetToQuery( updateStatement, nullEmbeddable );
nullEmbeddables.add( nullEmbeddable );
}
}
else {
// simply unset the column
addUnsetToQuery( updateStatement, column );
}
break;
}
}
}
return updateStatement;
}
private static boolean notInIdField(MongoDBTupleSnapshot snapshot, String column) {
return !column.equals( ID_FIELDNAME ) && !column.endsWith( PROPERTY_SEPARATOR + ID_FIELDNAME ) && !snapshot.isKeyColumn( column );
}
@Override
public void removeTuple(EntityKey key, TupleContext tupleContext) {
Document toDelete = prepareIdObject( key );
WriteConcern writeConcern = getWriteConcern( tupleContext );
MongoCollection collection = getCollection( key ).withWriteConcern( writeConcern );
Document deleted = collection.findOneAndDelete( toDelete );
if ( deleted != null ) {
provider.getBinaryStorageManager().removeEntityFromBinaryStorage( deleted, key.getMetadata() );
}
}
@Override
public boolean removeTupleWithOptimisticLock(EntityKey entityKey, Tuple oldLockState, TupleContext tupleContext) {
Document toDelete = prepareIdObject( entityKey );
for ( String versionColumn : oldLockState.getColumnNames() ) {
toDelete.put( versionColumn, oldLockState.get( versionColumn ) );
}
MongoCollection collection = getCollection( entityKey );
Document deleted = collection.findOneAndDelete( toDelete );
return deleted != null;
}
//not for embedded
private Document findAssociation(AssociationKey key, AssociationContext associationContext, AssociationStorageStrategy storageStrategy) {
final Document associationKeyObject = associationKeyToObject( key, storageStrategy );
MongoCollection associationCollection = getAssociationCollection( key, storageStrategy, associationContext );
FindIterable fi = associationCollection.find( associationKeyObject );
return fi != null ? ( fi.projection( getProjection( key, false ) ).first() ) : null ;
}
private static Document getProjection(AssociationKey key, boolean embedded) {
if ( embedded ) {
return getProjection( Collections.singletonList( key.getMetadata().getCollectionRole() ) );
}
else {
return getProjection( ROWS_FIELDNAME_LIST );
}
}
private static boolean isInTheInsertionQueue(EntityKey key, AssociationContext associationContext) {
OperationsQueue queue = associationContext.getOperationsQueue();
return queue != null && queue.isInTheInsertionQueue( key );
}
@Override
public Association getAssociation(AssociationKey key, AssociationContext associationContext) {
AssociationStorageStrategy storageStrategy = getAssociationStorageStrategy( key, associationContext );
if ( isEmbeddedAssociation( key ) && isInTheInsertionQueue( key.getEntityKey(), associationContext ) ) {
// The association is embedded and the owner of the association is in the insertion queue
Document idObject = prepareIdObject( key.getEntityKey() );
return new Association( new MongoDBAssociationSnapshot( idObject, key, storageStrategy ) );
}
// We need to execute the previous operations first or it won't be able to find the key that should have
// been created
executeBatch( associationContext.getOperationsQueue() );
if ( storageStrategy == AssociationStorageStrategy.IN_ENTITY ) {
Document entity = getEmbeddingEntity( key, associationContext );
if ( entity != null && hasField( entity, key.getMetadata().getCollectionRole() ) ) {
return new Association( new MongoDBAssociationSnapshot( entity, key, storageStrategy ) );
}
else {
return null;
}
}
final Document result = findAssociation( key, associationContext, storageStrategy );
if ( result == null ) {
return null;
}
else {
return new Association( new MongoDBAssociationSnapshot( result, key, storageStrategy ) );
}
}
private static boolean isEmbeddedAssociation(AssociationKey key) {
return AssociationKind.EMBEDDED_COLLECTION == key.getMetadata().getAssociationKind();
}
@Override
public Association createAssociation(AssociationKey key, AssociationContext associationContext) {
AssociationStorageStrategy storageStrategy = getAssociationStorageStrategy( key, associationContext );
Document document = storageStrategy == AssociationStorageStrategy.IN_ENTITY
? getEmbeddingEntity( key, associationContext )
: associationKeyToObject( key, storageStrategy );
Association association = new Association( new MongoDBAssociationSnapshot( document, key, storageStrategy ) );
// in the case of an association stored in the entity structure, we might end up with rows present in the
// current snapshot of the entity while we want an empty association here. So, in this case, we clear the
// snapshot to be sure the association created is empty.
if ( !association.isEmpty() ) {
association.clear();
}
return association;
}
/**
* Returns the rows of the given association as to be stored in the database. The return value is one of the
* following:
*
*
A list of plain values such as {@code String}s, {@code int}s etc. in case there is exactly one row key column
* which is not part of the association key (in this case we don't need to persist the key name as it can be
* restored from the association key upon loading) or
*
A list of {@code Document}s with keys/values for all row key columns which are not part of the association
* key
*
A {@link Document} with a key for each entry in case the given association has exactly one row key column
* which is of type {@code String} (e.g. a hash map) and {@link DocumentStoreProperties#MAP_STORAGE} is not set to
* {@link MapStorageType#AS_LIST}. The map values will either be plain values (in case it's single values) or
* another {@code Document}.
*
*/
private static Object getAssociationRows(Association association, AssociationKey key, AssociationContext associationContext) {
boolean organizeByRowKey = DotPatternMapHelpers.organizeAssociationMapByRowKey( association, key, associationContext );
// transform map entries such as ( addressType='home', address_id=123) into the more
// natural ( { 'home'=123 }
if ( organizeByRowKey ) {
String rowKeyColumn = organizeByRowKey ? key.getMetadata().getRowKeyIndexColumnNames()[0] : null;
Document rows = new Document();
for ( RowKey rowKey : association.getKeys() ) {
Document row = (Document) getAssociationRow( association.get( rowKey ), key );
String rowKeyValue = (String) row.remove( rowKeyColumn );
// if there is a single column on the value side left, unwrap it
if ( row.keySet().size() == 1 ) {
rows.put( rowKeyValue, DocumentUtil.toMap( row ).values().iterator().next() );
}
else {
rows.put( rowKeyValue, row );
}
}
return rows;
}
// non-map rows can be taken as is
else {
List