Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 1998, 2021 Oracle, IBM Corporation, and/or its affiliates. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0,
* or the Eclipse Distribution License v. 1.0 which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
*/
// Contributors:
// Oracle - initial API and implementation from Oracle TopLink
// 12/30/2010-2.3 Guy Pelletier
// - 312253: Descriptor exception with Embeddable on DDL gen
// 07/27/2012-2.5 Chris Delahunt
// - 371950: Metadata caching
// 10/25/2012-2.5 Guy Pelletier
// - 374688: JPA 2.1 Converter support
// 02/11/2013-2.5 Guy Pelletier
// - 365931: @JoinColumn(name="FK_DEPT",insertable = false, updatable = true) causes INSERT statement to include this data value that it is associated with
// 02/14/2018-2.7.2 Lukas Jungmann
// - 530680: embedded element collection within an entity of protected isolation does not merged changes into clones correctly
// 03/14/2018-2.7 Will Dazey
// - 500753: Synchronize initialization of InsertQuery
package org.eclipse.persistence.mappings;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import org.eclipse.persistence.descriptors.ClassDescriptor;
import org.eclipse.persistence.descriptors.DescriptorEvent;
import org.eclipse.persistence.descriptors.DescriptorEventManager;
import org.eclipse.persistence.descriptors.changetracking.AttributeChangeTrackingPolicy;
import org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy;
import org.eclipse.persistence.descriptors.changetracking.ObjectChangeTrackingPolicy;
import org.eclipse.persistence.exceptions.ConversionException;
import org.eclipse.persistence.exceptions.DatabaseException;
import org.eclipse.persistence.exceptions.DescriptorException;
import org.eclipse.persistence.exceptions.OptimisticLockException;
import org.eclipse.persistence.exceptions.QueryException;
import org.eclipse.persistence.expressions.Expression;
import org.eclipse.persistence.expressions.ExpressionBuilder;
import org.eclipse.persistence.expressions.ExpressionMath;
import org.eclipse.persistence.indirection.IndirectList;
import org.eclipse.persistence.indirection.ValueHolder;
import org.eclipse.persistence.internal.descriptors.DescriptorIterator;
import org.eclipse.persistence.internal.descriptors.ObjectBuilder;
import org.eclipse.persistence.internal.expressions.SQLUpdateStatement;
import org.eclipse.persistence.internal.helper.ConversionManager;
import org.eclipse.persistence.internal.helper.DatabaseField;
import org.eclipse.persistence.internal.helper.DatabaseTable;
import org.eclipse.persistence.internal.helper.IdentityHashSet;
import org.eclipse.persistence.internal.helper.NonSynchronizedVector;
import org.eclipse.persistence.internal.identitymaps.CacheId;
import org.eclipse.persistence.internal.identitymaps.CacheKey;
import org.eclipse.persistence.internal.mappings.converters.AttributeNameTokenizer.TokensIterator;
import org.eclipse.persistence.internal.queries.AttributeItem;
import org.eclipse.persistence.internal.queries.ContainerPolicy;
import org.eclipse.persistence.internal.queries.JoinedAttributeManager;
import org.eclipse.persistence.internal.sessions.AbstractRecord;
import org.eclipse.persistence.internal.sessions.AbstractSession;
import org.eclipse.persistence.internal.sessions.AggregateCollectionChangeRecord;
import org.eclipse.persistence.internal.sessions.ChangeRecord;
import org.eclipse.persistence.internal.sessions.MergeManager;
import org.eclipse.persistence.internal.sessions.ObjectChangeSet;
import org.eclipse.persistence.internal.sessions.UnitOfWorkChangeSet;
import org.eclipse.persistence.internal.sessions.UnitOfWorkImpl;
import org.eclipse.persistence.mappings.converters.Converter;
import org.eclipse.persistence.mappings.foundation.MapComponentMapping;
import org.eclipse.persistence.queries.DataModifyQuery;
import org.eclipse.persistence.queries.DatabaseQuery;
import org.eclipse.persistence.queries.DeleteAllQuery;
import org.eclipse.persistence.queries.DeleteObjectQuery;
import org.eclipse.persistence.queries.InsertObjectQuery;
import org.eclipse.persistence.queries.ModifyQuery;
import org.eclipse.persistence.queries.ObjectBuildingQuery;
import org.eclipse.persistence.queries.ObjectLevelModifyQuery;
import org.eclipse.persistence.queries.ObjectLevelReadQuery;
import org.eclipse.persistence.queries.QueryByExamplePolicy;
import org.eclipse.persistence.queries.ReadAllQuery;
import org.eclipse.persistence.queries.ReadQuery;
import org.eclipse.persistence.queries.UpdateObjectQuery;
import org.eclipse.persistence.queries.WriteObjectQuery;
import org.eclipse.persistence.sessions.CopyGroup;
import org.eclipse.persistence.sessions.DatabaseRecord;
import org.eclipse.persistence.sessions.Project;
import org.eclipse.persistence.sessions.remote.DistributedSession;
/**
*
Purpose: The aggregate collection mapping is used to represent the aggregate relationship between a single
* source object and a collection of target objects. The target objects cannot exist without the existence of the
* source object (privately owned)
* Unlike the normal aggregate mapping, there is a target table being mapped from the target objects.
* Unlike normal 1:m mapping, there is no 1:1 back reference mapping, as foreign key constraints have been resolved by the aggregation.
*
* @author King (Yaoping) Wang
* @since TOPLink/Java 3.0
*/
public class AggregateCollectionMapping extends CollectionMapping implements RelationalMapping, MapComponentMapping, EmbeddableMapping {
/** This is a key in the target table which is a foreign key in the target table. */
protected Vector targetForeignKeyFields;
/** This is a primary key in the source table that is used as foreign key in the target table */
protected Vector sourceKeyFields;
/** Foreign keys in the target table to the related keys in the source table */
protected Map targetForeignKeyToSourceKeys;
/** Map the name of a field in the aggregate collection descriptor to a field in the actual table specified in the mapping. */
protected Map aggregateToSourceFields;
/** Map the name of an attribute of the reference descriptor mapped with AggregateCollectionMapping to aggregateToSourceFieldNames
* that should be applied to this mapping.
*/
protected Map> nestedAggregateToSourceFields;
/**
* List of converters to apply at initialize time to their cloned aggregate mappings.
*/
protected Map converters;
/** In RemoteSession case the mapping needs the reference descriptor serialized from the server,
* but referenceDescriptor attribute defined as transient in the superclass. To overcome that
* in non-remote case referenceDescriptor is assigned to remoteReferenceDescriptor; in remote - another way around.
*/
protected ClassDescriptor remoteReferenceDescriptor;
/** Default source table that should be used with the default source fields of this mapping. */
protected DatabaseTable defaultSourceTable;
/** Indicates whether the entire target object is primary key - in that case the object can't be updated in the db,
* but rather deleted and then re-inserted.
*/
protected boolean isEntireObjectPK;
/** These queries used to update listOrderField
*/
protected transient DataModifyQuery updateListOrderFieldQuery;
protected transient DataModifyQuery bulkUpdateListOrderFieldQuery;
protected transient DataModifyQuery pkUpdateListOrderFieldQuery;
/** indicates whether listOrderField value could be updated in the db. Used only if listOrderField!=null */
protected boolean isListOrderFieldUpdatable;
protected static final String min = "min";
protected static final String max = "max";
protected static final String shift = "shift";
protected static final String pk = "pk";
protected static final String bulk = "bulk";
/**
* Indicates whether the mapping (or at least one of its nested mappings, at any nested depth)
* references an entity.
* To return true the mapping (or nested mapping) should be ForeignReferenceMapping with non-null and non-aggregate reference descriptor.
* Lazily initialized.
*/
protected Boolean hasNestedIdentityReference;
/**
* PUBLIC:
* Default constructor.
*/
public AggregateCollectionMapping() {
this.aggregateToSourceFields = new HashMap(5);
this.nestedAggregateToSourceFields = new HashMap>(5);
this.converters = new HashMap();
this.targetForeignKeyToSourceKeys = new HashMap(5);
this.sourceKeyFields = org.eclipse.persistence.internal.helper.NonSynchronizedVector.newInstance(1);
this.targetForeignKeyFields = org.eclipse.persistence.internal.helper.NonSynchronizedVector.newInstance(1);
this.deleteAllQuery = new DeleteAllQuery();
//aggregates should always cascade all operations
this.setCascadeAll(true);
this.isListOrderFieldSupported = true;
this.isListOrderFieldUpdatable = true;
this.isPrivateOwned = true;
}
/**
* INTERNAL:
*/
@Override
public boolean isRelationalMapping() {
return true;
}
/**
* INTERNAL:
* In JPA users may specify overrides to apply to a many to many mapping
* on a shared embeddable descriptor. These settings are applied at
* initialize time, after the reference descriptor is cloned. In an
* aggregate collection case, this is not supported and currently silently
* ignored and does nothing.
*/
@Override
public void addOverrideManyToManyMapping(ManyToManyMapping mapping) {
// Not supported at this time ...
}
/**
* INTERNAL:
* In JPA users may specify overrides to apply to a unidirectional one to
* many mapping on a shared embeddable descriptor. These settings are
* applied at initialize time, after the reference descriptor is cloned. In
* an aggregate collection case, this is not supported and currently
* silently ignored and does nothing.
*/
@Override
public void addOverrideUnidirectionalOneToManyMapping(UnidirectionalOneToManyMapping mapping) {
// Not supported at this time ...
}
/**
* Add a converter to be applied to a mapping of the aggregate descriptor.
*/
@Override
public void addConverter(Converter converter, String attributeName) {
converters.put(attributeName, converter);
}
/**
* PUBLIC:
* Maps a field name in the aggregate descriptor
* to a field name in the source table.
*/
public void addFieldNameTranslation(String sourceFieldName, String aggregateFieldName) {
addFieldTranslation(new DatabaseField(sourceFieldName), aggregateFieldName);
}
/**
* PUBLIC:
* Maps a field name in the aggregate descriptor
* to a field in the source table.
*/
public void addFieldTranslation(DatabaseField sourceField, String aggregateField) {
aggregateToSourceFields.put(aggregateField, sourceField);
}
/**
* PUBLIC:
*
* Maps a field name in the aggregate descriptor
* to a field name in the source table.
*/
public void addFieldTranslations(Map map) {
aggregateToSourceFields.putAll(map);
}
/**
* PUBLIC:
* Map the name of an attribute of the reference descriptor mapped with AggregateCollectionMapping to aggregateToSourceFieldNames
* that should be applied to this mapping.
*/
public void addNestedFieldNameTranslation(String attributeName, String sourceFieldName, String aggregateFieldName) {
addNestedFieldTranslation(attributeName, new DatabaseField(sourceFieldName), aggregateFieldName);
}
/**
* PUBLIC:
* Map the name of an attribute of the reference descriptor mapped with AggregateCollectionMapping to aggregateToSourceFieldNames
* that should be applied to this mapping.
*/
public void addNestedFieldTranslation(String attributeName, DatabaseField sourceField, String aggregateFieldName) {
Map attributeFieldNameTranslation = nestedAggregateToSourceFields.get(attributeName);
if (attributeFieldNameTranslation == null) {
attributeFieldNameTranslation = new HashMap(5);
nestedAggregateToSourceFields.put(attributeName, attributeFieldNameTranslation);
}
attributeFieldNameTranslation.put(aggregateFieldName, sourceField);
}
/**
* PUBLIC:
* Map the name of an attribute of the reference descriptor mapped with AggregateCollectionMapping to aggregateToSourceFields
* that should be applied to this mapping.
*/
public void addNestedFieldNameTranslations(String attributeName, Map map) {
Map attributeFieldNameTranslation = nestedAggregateToSourceFields.get(attributeName);
if (attributeFieldNameTranslation == null) {
nestedAggregateToSourceFields.put(attributeName, map);
} else {
attributeFieldNameTranslation.putAll(map);
}
}
/**
* PUBLIC:
* Define the target foreign key relationship in the 1-M aggregate collection mapping.
* Both the target foreign key field and the source primary key field must be specified.
*/
@Override
public void addTargetForeignKeyField(DatabaseField targetForeignKey, DatabaseField sourceKey) {
getTargetForeignKeyFields().addElement(targetForeignKey);
getSourceKeyFields().addElement(sourceKey);
}
/**
* PUBLIC:
* Define the target foreign key relationship in the 1-M aggregate collection mapping.
* Both the target foreign key field name and the source primary key field name must be specified.
*/
public void addTargetForeignKeyFieldName(String targetForeignKey, String sourceKey) {
addTargetForeignKeyField(new DatabaseField(targetForeignKey), new DatabaseField(sourceKey));
}
/**
* INTERNAL:
* Used during building the backup shallow copy to copy the vector without re-registering the target objects.
*/
@Override
public Object buildBackupCloneForPartObject(Object attributeValue, Object clone, Object backup, UnitOfWorkImpl unitOfWork) {
ContainerPolicy containerPolicy = getContainerPolicy();
if (attributeValue == null) {
return containerPolicy.containerInstance(1);
}
Object clonedAttributeValue = containerPolicy.containerInstance(containerPolicy.sizeFor(attributeValue));
if (isSynchronizeOnMerge) {
synchronized (attributeValue) {
for (Object valuesIterator = containerPolicy.iteratorFor(attributeValue);
containerPolicy.hasNext(valuesIterator);) {
Object wrappedElement = containerPolicy.nextEntry(valuesIterator, unitOfWork);
Object cloneValue = buildElementBackupClone(containerPolicy.unwrapIteratorResult(wrappedElement), unitOfWork);
containerPolicy.addInto(containerPolicy.keyFromIterator(valuesIterator), cloneValue, clonedAttributeValue, unitOfWork);
}
}
} else {
for (Object valuesIterator = containerPolicy.iteratorFor(attributeValue);
containerPolicy.hasNext(valuesIterator);) {
Object wrappedElement = containerPolicy.nextEntry(valuesIterator, unitOfWork);
Object cloneValue = buildElementBackupClone(containerPolicy.unwrapIteratorResult(wrappedElement), unitOfWork);
containerPolicy.addInto(containerPolicy.keyFromIterator(valuesIterator), cloneValue, clonedAttributeValue, unitOfWork);
}
}
return clonedAttributeValue;
}
/**
* INTERNAL:
* Require for cloning, the part must be cloned.
* Ignore the objects, use the attribute value.
* this is identical to the super class except that the element must be added to the new
* aggregates collection so that the referenced objects will be cloned correctly
*/
@Override
public Object buildCloneForPartObject(Object attributeValue, Object original, CacheKey cacheKey, Object clone, AbstractSession cloningSession, Integer refreshCascade, boolean isExisting, boolean isFromSharedCache) {
ContainerPolicy containerPolicy = getContainerPolicy();
if (attributeValue == null) {
return containerPolicy.containerInstance(1);
}
Object clonedAttributeValue = containerPolicy.containerInstance(containerPolicy.sizeFor(attributeValue));
Object temporaryCollection = null;
if (isSynchronizeOnMerge) {
// I need to synchronize here to prevent the collection from changing while I am cloning it.
// This will occur when I am merging into the cache and I am instantiating a UOW valueHolder at the same time
// I can not synchronize around the clone, as this will cause deadlocks, so I will need to copy the collection then create the clones
// I will use a temporary collection to help speed up the process
synchronized (attributeValue) {
temporaryCollection = containerPolicy.cloneFor(attributeValue);
}
} else {
temporaryCollection = attributeValue;
}
for (Object valuesIterator = containerPolicy.iteratorFor(temporaryCollection);
containerPolicy.hasNext(valuesIterator);) {
Object wrappedElement = containerPolicy.nextEntry(valuesIterator, cloningSession);
Object originalElement = containerPolicy.unwrapIteratorResult(wrappedElement);
//need to add to aggregate list in the case that there are related objects.
if (cloningSession.isUnitOfWork() && ((UnitOfWorkImpl)cloningSession).isOriginalNewObject(original)) {
((UnitOfWorkImpl)cloningSession).addNewAggregate(originalElement);
}
Object cloneValue = buildElementClone(originalElement, clone, cacheKey, refreshCascade, cloningSession, isExisting, isFromSharedCache);
Object clonedKey = containerPolicy.buildCloneForKey(containerPolicy.keyFromIterator(valuesIterator), clone, cacheKey, refreshCascade, cloningSession, isExisting, isFromSharedCache);
containerPolicy.addInto(clonedKey, cloneValue, clonedAttributeValue, cloningSession);
}
if(temporaryCollection instanceof IndirectList) {
((IndirectList)clonedAttributeValue).setIsListOrderBrokenInDb(((IndirectList)temporaryCollection).isListOrderBrokenInDb());
}
return clonedAttributeValue;
}
/**
* INTERNAL:
* Clone the aggregate collection, if necessary.
*/
protected Object buildElementBackupClone(Object element, UnitOfWorkImpl unitOfWork) {
// Do not clone for read-only.
if (unitOfWork.isClassReadOnly(element.getClass(), getReferenceDescriptor())) {
return element;
}
ClassDescriptor aggregateDescriptor = getReferenceDescriptor(element.getClass(), unitOfWork);
Object clonedElement = aggregateDescriptor.getObjectBuilder().buildBackupClone(element, unitOfWork);
return clonedElement;
}
/**
* INTERNAL:
* Clone the aggregate collection, if necessary.
*/
@Override
public Object buildElementClone(Object element, Object parent, CacheKey parentCacheKey, Integer refreshCascade, AbstractSession cloningSession, boolean isExisting, boolean isFromSharedCache) {
// Do not clone for read-only.
if (cloningSession.isUnitOfWork() && cloningSession.isClassReadOnly(element.getClass(), getReferenceDescriptor())) {
return element;
}
ClassDescriptor aggregateDescriptor = getReferenceDescriptor(element.getClass(), cloningSession);
// bug 2612602 as we are building the working copy make sure that we call to correct clone method.
Object clonedElement = aggregateDescriptor.getObjectBuilder().instantiateWorkingCopyClone(element, cloningSession);
aggregateDescriptor.getObjectBuilder().populateAttributesForClone(element, parentCacheKey, clonedElement, refreshCascade, cloningSession);
if (cloningSession.isUnitOfWork()){
// CR 4155 add the originals to the UnitOfWork so that we can find it later in the merge
// as aggregates have no identity. If we don't do this we will loose indirection information.
((UnitOfWorkImpl)cloningSession).getCloneToOriginals().put(clonedElement, element);
}
return clonedElement;
}
/**
* INTERNAL:
* In case Query By Example is used, this method builds and returns an expression that
* corresponds to a single attribute and it's value.
*/
public Expression buildExpression(Object queryObject, QueryByExamplePolicy policy, Expression expressionBuilder, Map processedObjects, AbstractSession session) {
if (policy.shouldValidateExample()){
throw QueryException.unsupportedMappingQueryByExample(queryObject.getClass().getName(), this);
}
return null;
}
/**
* INTERNAL:
* This method is used to store the FK fields that can be cached that correspond to noncacheable mappings
* the FK field values will be used to re-issue the query when cloning the shared cache entity
*/
@Override
public void collectQueryParameters(Set cacheFields){
for (DatabaseField field : getSourceKeyFields()) {
cacheFields.add(field);
}
}
/**
* INTERNAL:
* Convert all the class-name-based settings in this mapping to actual
* class-based settings. This method is used when converting a project that
* has been built with class names to a project with classes.
* @param classLoader Where to search for classes.
*/
@Override
public void convertClassNamesToClasses(ClassLoader classLoader) {
super.convertClassNamesToClasses(classLoader);
for (Converter converter : converters.values()) {
// Convert and any Converter class names.
convertConverterClassNamesToClasses(converter, classLoader);
}
}
/**
* INTERNAL:
* Cascade discover and persist new objects during commit.
*/
@Override
public void cascadeDiscoverAndPersistUnregisteredNewObjects(Object object, Map newObjects, Map unregisteredExistingObjects, Map visitedObjects, UnitOfWorkImpl uow, Set cascadeErrors) {
//aggregate objects are not registered but their mappings should be.
Object cloneAttribute = null;
cloneAttribute = getAttributeValueFromObject(object);
if ((cloneAttribute == null) || (!getIndirectionPolicy().objectIsInstantiated(cloneAttribute))) {
return;
}
ObjectBuilder builder = null;
ContainerPolicy cp = getContainerPolicy();
Object cloneObjectCollection = null;
cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
Object cloneIter = cp.iteratorFor(cloneObjectCollection);
while (cp.hasNext(cloneIter)) {
Object wrappedObject = cp.nextEntry(cloneIter, uow);
Object nextObject = cp.unwrapIteratorResult(wrappedObject);
if (nextObject != null) {
builder = getReferenceDescriptor(nextObject.getClass(), uow).getObjectBuilder();
builder.cascadeDiscoverAndPersistUnregisteredNewObjects(nextObject, newObjects, unregisteredExistingObjects, visitedObjects, uow, cascadeErrors);
cp.cascadeDiscoverAndPersistUnregisteredNewObjects(wrappedObject, newObjects, unregisteredExistingObjects, visitedObjects, uow, cascadeErrors);
}
}
}
/**
* INTERNAL:
* Cascade registerNew for Create through mappings that require the cascade
*/
@Override
public void cascadeRegisterNewIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects) {
// Aggregate objects are not registered but their mappings should be.
Object attributeValue = getAttributeValueFromObject(object);
if ((attributeValue == null)
// Also check if the source is new, then must always cascade.
|| (!this.indirectionPolicy.objectIsInstantiated(attributeValue) && !uow.isCloneNewObject(object))) {
return;
}
ObjectBuilder builder = null;
ContainerPolicy cp = this.containerPolicy;
Object cloneObjectCollection = null;
cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
Object cloneIter = cp.iteratorFor(cloneObjectCollection);
while (cp.hasNext(cloneIter)) {
Object wrappedObject = cp.nextEntry(cloneIter, uow);
Object nextObject = cp.unwrapIteratorResult(wrappedObject);
if (nextObject != null && (! visitedObjects.containsKey(nextObject))){
visitedObjects.put(nextObject, nextObject);
builder = getReferenceDescriptor(nextObject.getClass(), uow).getObjectBuilder();
builder.cascadeRegisterNewForCreate(nextObject, uow, visitedObjects);
cp.cascadeRegisterNewIfRequired(wrappedObject, uow, visitedObjects);
}
}
}
/**
* INTERNAL:
* Cascade registerNew for Create through mappings that require the cascade
*/
@Override
public void cascadePerformRemoveIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects){
//aggregate objects are not registered but their mappings should be.
Object cloneAttribute = getAttributeValueFromObject(object);
if ((cloneAttribute == null)) {
return;
}
// PERF: If not instantiated, then avoid instantiating, delete-all will handle deletion.
if (usesIndirection() && (!mustDeleteReferenceObjectsOneByOne())) {
if (!this.indirectionPolicy.objectIsInstantiated(cloneAttribute)) {
return;
}
}
ObjectBuilder builder = null;
ContainerPolicy cp = getContainerPolicy();
Object cloneObjectCollection = null;
cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
Object cloneIter = cp.iteratorFor(cloneObjectCollection);
while (cp.hasNext(cloneIter)) {
Object wrappedObject = cp.nextEntry(cloneIter, uow);
Object nextObject = cp.unwrapIteratorResult(wrappedObject);
if (nextObject != null && ( ! visitedObjects.containsKey(nextObject) ) ){
visitedObjects.put(nextObject, nextObject);
if (this.isCascadeOnDeleteSetOnDatabase) {
uow.getCascadeDeleteObjects().add(nextObject);
}
builder = getReferenceDescriptor(nextObject.getClass(), uow).getObjectBuilder();
builder.cascadePerformRemove(nextObject, uow, visitedObjects);
cp.cascadePerformRemoveIfRequired(wrappedObject, uow, visitedObjects);
}
}
}
/**
* INTERNAL:
* Cascade perform removal of orphaned private owned objects from the UnitOfWorkChangeSet
*/
@Override
public void cascadePerformRemovePrivateOwnedObjectFromChangeSetIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects) {
// if the object is not instantiated, do not instantiate or cascade
Object attributeValue = getAttributeValueFromObject(object);
if (attributeValue != null && getIndirectionPolicy().objectIsInstantiated(attributeValue)) {
Object cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
ContainerPolicy cp = getContainerPolicy();
for (Object cloneIter = cp.iteratorFor(cloneObjectCollection); cp.hasNext(cloneIter);) {
Object referencedObject = cp.next(cloneIter, uow);
if (referencedObject != null && !visitedObjects.containsKey(referencedObject)) {
visitedObjects.put(referencedObject, referencedObject);
ObjectBuilder builder = getReferenceDescriptor(referencedObject.getClass(), uow).getObjectBuilder();
builder.cascadePerformRemovePrivateOwnedObjectFromChangeSet(referencedObject, uow, visitedObjects);
}
}
}
}
/**
* INTERNAL:
* The mapping clones itself to create deep copy.
*/
@Override
public Object clone() {
AggregateCollectionMapping mappingObject = (AggregateCollectionMapping)super.clone();
mappingObject.setTargetForeignKeyToSourceKeys(new HashMap(getTargetForeignKeyToSourceKeys()));
mappingObject.setSourceKeyFields(org.eclipse.persistence.internal.helper.NonSynchronizedVector.newInstance(getSourceKeyFields()));
mappingObject.setTargetForeignKeyFields(org.eclipse.persistence.internal.helper.NonSynchronizedVector.newInstance(getTargetForeignKeyFields()));
mappingObject.aggregateToSourceFields = new HashMap(this.aggregateToSourceFields);
mappingObject.nestedAggregateToSourceFields = new HashMap(this.nestedAggregateToSourceFields);
if(updateListOrderFieldQuery != null) {
mappingObject.updateListOrderFieldQuery = this.updateListOrderFieldQuery;
}
if(bulkUpdateListOrderFieldQuery != null) {
mappingObject.bulkUpdateListOrderFieldQuery = this.bulkUpdateListOrderFieldQuery;
}
if(pkUpdateListOrderFieldQuery != null) {
mappingObject.pkUpdateListOrderFieldQuery = this.pkUpdateListOrderFieldQuery;
}
return mappingObject;
}
/**
* INTERNAL:
* This method is used to create a change record from comparing two aggregate collections
* @return ChangeRecord
*/
@Override
public ChangeRecord compareForChange(Object clone, Object backUp, ObjectChangeSet owner, AbstractSession session) {
Object cloneAttribute = null;
Object backUpAttribute = null;
cloneAttribute = getAttributeValueFromObject(clone);
if ((cloneAttribute != null) && (!getIndirectionPolicy().objectIsInstantiated(cloneAttribute))) {
//If the clone's valueholder was not triggered then no changes were made.
return null;
}
if (!owner.isNew()) {
backUpAttribute = getAttributeValueFromObject(backUp);
if ((backUpAttribute == null) && (cloneAttribute == null)) {
return null;
}
ContainerPolicy cp = getContainerPolicy();
Object backupCollection = null;
Object cloneCollection = null;
cloneCollection = getRealCollectionAttributeValueFromObject(clone, session);
backupCollection = getRealCollectionAttributeValueFromObject(backUp, session);
if (cp.sizeFor(backupCollection) != cp.sizeFor(cloneCollection)) {
return convertToChangeRecord(cloneCollection, backupCollection, owner, session);
}
boolean change = false;
if (cp.isMapPolicy()){
change = compareMapCollectionForChange((Map)cloneCollection, (Map)backupCollection, session);
} else {
Object cloneIterator = cp.iteratorFor(cloneCollection);
Object backUpIterator = cp.iteratorFor(backupCollection);
// For bug 2863721 must use a different UnitOfWorkChangeSet as here just
// seeing if changes are needed. If changes are needed then a
// real changeSet will be created later.
UnitOfWorkChangeSet uowComparisonChangeSet = new UnitOfWorkChangeSet(session);
while (cp.hasNext(cloneIterator)) {
Object cloneObject = cp.next(cloneIterator, session);
// For CR#2285 assume that if null is added the collection has changed.
if (cloneObject == null) {
change = true;
break;
}
Object backUpObject = null;
if (cp.hasNext(backUpIterator)) {
backUpObject = cp.next(backUpIterator, session);
} else {
change = true;
break;
}
if (cloneObject.getClass().equals(backUpObject.getClass())) {
ObjectBuilder builder = getReferenceDescriptor(cloneObject.getClass(), session).getObjectBuilder();
ObjectChangeSet initialChanges = builder.createObjectChangeSet(cloneObject, uowComparisonChangeSet, owner.isNew(), session);
//compare for changes will return null if no change is detected and I need to remove the changeSet
ObjectChangeSet changes = builder.compareForChange(cloneObject, backUpObject, uowComparisonChangeSet, session);
if (changes != null) {
change = true;
break;
}
} else {
change = true;
break;
}
}
if (cp.hasNext(backUpIterator)){
change = true;
}
}
if ((change == true)) {
return convertToChangeRecord(cloneCollection, backupCollection, owner, session);
} else {
return null;
}
}
return convertToChangeRecord(getRealCollectionAttributeValueFromObject(clone, session), containerPolicy.containerInstance(), owner, session);
}
/**
* INTERNAL:
* Determine if an AggregateCollection that is contained as a map has changed by comparing the values in the
* clone to the values in the backup.
*/
protected boolean compareMapCollectionForChange(Map cloneObjectCollection, Map backUpCollection, AbstractSession session){
HashMap originalKeyValues = new HashMap(10);
Object backUpIter = containerPolicy.iteratorFor(backUpCollection);
while (containerPolicy.hasNext(backUpIter)) {// Make a lookup of the objects
Map.Entry entry = (Map.Entry)containerPolicy.nextEntry(backUpIter, session);
originalKeyValues.put(entry.getKey(), entry.getValue());
}
UnitOfWorkChangeSet uowComparisonChangeSet = new UnitOfWorkChangeSet(session);
Object cloneIter = containerPolicy.iteratorFor(cloneObjectCollection);
while (containerPolicy.hasNext(cloneIter)) {//Compare them with the objects from the clone
Map.Entry wrappedFirstObject = (Map.Entry)containerPolicy.nextEntry(cloneIter, session);
Object firstValue = wrappedFirstObject.getValue();
Object firstKey = wrappedFirstObject.getKey();
Object backupValue = originalKeyValues.get(firstKey);
if (!originalKeyValues.containsKey(firstKey)) {
return true;
} else if ((backupValue == null) && (firstValue != null)) {//the object was not in the backup
return true;
} else {
ObjectBuilder builder = getReferenceDescriptor(firstValue.getClass(), session).getObjectBuilder();
ObjectChangeSet changes = builder.compareForChange(firstValue, backupValue, uowComparisonChangeSet, session);
if (changes != null) {
return true;
} else {
originalKeyValues.remove(firstKey);
}
}
}
return !originalKeyValues.isEmpty();
}
/**
* INTERNAL:
* Old and new lists are compared and only the changes are written to the database.
* Called only if listOrderField != null
*/
@Override
protected void compareListsAndWrite(List previousList, List currentList, WriteObjectQuery query) throws DatabaseException, OptimisticLockException {
if(this.isListOrderFieldUpdatable) {
compareListsAndWrite_UpdatableListOrderField(previousList, currentList, query);
} else {
compareListsAndWrite_NonUpdatableListOrderField(previousList, currentList, query);
}
}
/**
* INTERNAL:
* Old and new lists are compared and only the changes are written to the database.
* Called only if listOrderField != null
*/
protected void compareListsAndWrite_NonUpdatableListOrderField(List previousList, List currentList, WriteObjectQuery query) throws DatabaseException, OptimisticLockException {
boolean shouldRepairOrder = false;
if(currentList instanceof IndirectList) {
shouldRepairOrder = ((IndirectList)currentList).isListOrderBrokenInDb();
}
HashMap