org.eclipse.persistence.mappings.AggregateObjectMapping Maven / Gradle / Ivy
Show all versions of eclipselink Show documentation
/*
* Copyright (c) 1998, 2020 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2020 IBM Corporation. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0,
* or the Eclipse Distribution License v. 1.0 which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
*/
// Contributors:
// Oracle - initial API and implementation from Oracle TopLink
// 07/19/2011-2.2.1 Guy Pelletier
// - 338812: ManyToMany mapping in aggregate object violate integrity constraint on deletion
// 08/01/2012-2.5 Chris Delahunt
// - 371950: Metadata caching
// 10/25/2012-2.5 Guy Pelletier
// - 374688: JPA 2.1 Converter support
// 09 Jan 2013-2.5 Gordon Yorke
// - 397772: JPA 2.1 Entity Graph Support
// 02/11/2013-2.5 Guy Pelletier
// - 365931: @JoinColumn(name="FK_DEPT",insertable = false, updatable = true) causes INSERT statement to include this data value that it is associated with
// 06/03/2013-2.5.1 Guy Pelletier
// - 402380: 3 jpa21/advanced tests failed on server with
// "java.lang.NoClassDefFoundError: org/eclipse/persistence/testing/models/jpa21/advanced/enums/Gender"
// 10/19/2016-2.6 Will Dazey
// - 506168: Make sure nestedTranslation map is new reference when cloned
// 03/22/2018-2.7.2 Lukas Jungmann
// - 441498: @ElementCollection on Map<@Embeddable,String> cause NullPointerException when @Embeddable has a FK
package org.eclipse.persistence.mappings;
import java.beans.PropertyChangeListener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
import org.eclipse.persistence.descriptors.ClassDescriptor;
import org.eclipse.persistence.descriptors.FetchGroupManager;
import org.eclipse.persistence.descriptors.changetracking.AttributeChangeTrackingPolicy;
import org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy;
import org.eclipse.persistence.descriptors.changetracking.ObjectChangeTrackingPolicy;
import org.eclipse.persistence.exceptions.DatabaseException;
import org.eclipse.persistence.exceptions.DescriptorException;
import org.eclipse.persistence.exceptions.QueryException;
import org.eclipse.persistence.expressions.Expression;
import org.eclipse.persistence.internal.descriptors.DescriptorIterator;
import org.eclipse.persistence.internal.descriptors.ObjectBuilder;
import org.eclipse.persistence.internal.expressions.SQLSelectStatement;
import org.eclipse.persistence.internal.helper.DatabaseField;
import org.eclipse.persistence.internal.helper.DatabaseTable;
import org.eclipse.persistence.internal.identitymaps.CacheKey;
import org.eclipse.persistence.internal.queries.ContainerPolicy;
import org.eclipse.persistence.internal.queries.EntityFetchGroup;
import org.eclipse.persistence.internal.queries.JoinedAttributeManager;
import org.eclipse.persistence.internal.queries.MappedKeyMapContainerPolicy;
import org.eclipse.persistence.internal.sessions.AbstractRecord;
import org.eclipse.persistence.internal.sessions.AbstractSession;
import org.eclipse.persistence.internal.sessions.AggregateChangeRecord;
import org.eclipse.persistence.internal.sessions.ChangeRecord;
import org.eclipse.persistence.internal.sessions.MergeManager;
import org.eclipse.persistence.internal.sessions.ObjectChangeSet;
import org.eclipse.persistence.internal.sessions.UnitOfWorkChangeSet;
import org.eclipse.persistence.internal.sessions.UnitOfWorkImpl;
import org.eclipse.persistence.logging.SessionLog;
import org.eclipse.persistence.mappings.converters.Converter;
import org.eclipse.persistence.mappings.foundation.AbstractTransformationMapping;
import org.eclipse.persistence.mappings.foundation.MapKeyMapping;
import org.eclipse.persistence.mappings.querykeys.DirectQueryKey;
import org.eclipse.persistence.mappings.querykeys.QueryKey;
import org.eclipse.persistence.queries.DeleteObjectQuery;
import org.eclipse.persistence.queries.FetchGroup;
import org.eclipse.persistence.queries.FetchGroupTracker;
import org.eclipse.persistence.queries.ObjectBuildingQuery;
import org.eclipse.persistence.queries.ObjectLevelReadQuery;
import org.eclipse.persistence.queries.ReadAllQuery;
import org.eclipse.persistence.queries.ReadObjectQuery;
import org.eclipse.persistence.queries.ReadQuery;
import org.eclipse.persistence.queries.WriteObjectQuery;
import org.eclipse.persistence.sessions.DatabaseRecord;
import org.eclipse.persistence.sessions.Project;
/**
* Purpose:Two objects can be considered to be related by aggregation if there is a strict
* 1:1 relationship between the objects. This means that the target (child or owned) object
* cannot exist without the source (parent) object.
*
* In TopLink, it also means the data for the owned object is stored in the same table as
* the parent.
*
* @author Sati
* @since TOPLink/Java 1.0
*/
public class AggregateObjectMapping extends AggregateMapping implements RelationalMapping, MapKeyMapping, EmbeddableMapping {
/**
* If all the fields in the database row for the aggregate object are NULL,
* then, by default, the mapping will place a null in the appropriate source object
* (as opposed to an aggregate object filled with nulls).
* To change this behavior, set the value of this variable to false. Then the mapping
* will build a new instance of the aggregate object that is filled with nulls
* and place it in the source object.
*/
protected boolean isNullAllowed;
protected DatabaseTable aggregateKeyTable = null;
/** Map the name of a field in the aggregate descriptor to a field in the source table. */
/** 322233 - changed to store the source DatabaseField to hold Case and other colunm info*/
protected Map aggregateToSourceFields;
/**
* Map of nested attributes that need to apply an override name to their
* a nested aggregate mapping's database field. Aggregate to source fields
* map is the existing EclipseLink functionality and works well when all
* embeddable mappings have unique database fields. This map adds specific
* attribute to database field override.
* @see #addFieldTranslation
*/
protected Map nestedFieldTranslations;
/**
* List of many to many mapping overrides to apply at initialize time to
* their cloned aggregate mappings.
*/
protected List overrideManyToManyMappings;
/**
* List of unidirectional one to many mapping overrides to apply at
* initialize time to their cloned aggregate mappings.
*/
protected List overrideUnidirectionalOneToManyMappings;
/**
* List of converters to apply at initialize time to their cloned aggregate mappings.
*/
protected Map converters;
/**
* List of maps id mappings that need to be set to read only at initialize
* time on their cloned aggregate mappings.
*/
protected List mapsIdMappings;
/**
* Default constructor.
*/
public AggregateObjectMapping() {
aggregateToSourceFields = new HashMap(5);
nestedFieldTranslations = new HashMap<>();
mapsIdMappings = new ArrayList<>();
overrideManyToManyMappings = new ArrayList<>();
overrideUnidirectionalOneToManyMappings = new ArrayList<>();
converters = new HashMap<>();
isNullAllowed = true;
}
/**
* INTERNAL:
*/
@Override
public boolean isRelationalMapping() {
return true;
}
/**
* INTERNAL:
* Used when initializing queries for mappings that use a Map
* Called when the selection query is being initialized to add the fields for the map key to the query
*/
@Override
public void addAdditionalFieldsToQuery(ReadQuery selectionQuery, Expression baseExpression){
for (DatabaseField field : getReferenceDescriptor().getAllFields()) {
if (selectionQuery.isObjectLevelReadQuery()) {
((ObjectLevelReadQuery)selectionQuery).addAdditionalField(baseExpression.getField(field));
} else if (selectionQuery.isDataReadQuery()) {
((SQLSelectStatement) selectionQuery.getSQLStatement()).addField(baseExpression.getField(field));
}
}
}
/**
* Add a converter to be applied to a mapping of the aggregate descriptor.
*/
@Override
public void addConverter(Converter converter, String attributeName) {
converters.put(attributeName, converter);
}
/**
* INTERNAL:
* Used when initializing queries for mappings that use a Map
* Called when the insert query is being initialized to ensure the fields for the map key are in the insert query
*/
@Override
public void addFieldsForMapKey(AbstractRecord joinRow){
for (DatabaseMapping mapping : getReferenceDescriptor().getMappings()) {
if (!mapping.isReadOnly()) {
for (DatabaseField field : mapping.getFields()) {
if (field.isUpdatable()){
joinRow.put(field, null);
}
}
}
}
}
/**
* PUBLIC:
* Add a field name translation that maps from a field name in the
* source table to a field name in the aggregate descriptor.
*/
public void addFieldNameTranslation(String sourceFieldName, String aggregateFieldName) {
// 322233 - changed to store the sourceField instead of sourceFieldName
addFieldTranslation(new DatabaseField(sourceFieldName), aggregateFieldName);
}
/**
* PUBLIC:
* Add a field translation that maps from a field in the
* source table to a field name in the aggregate descriptor.
*/
@Override
public void addFieldTranslation(DatabaseField sourceField, String aggregateFieldName) {
//AggregateObjectMapping does not seem to support Aggregates on multiple tables
String unQualifiedAggregateFieldName = aggregateFieldName.substring(aggregateFieldName.lastIndexOf('.') + 1);// -1 is returned for no ".".
getAggregateToSourceFields().put(unQualifiedAggregateFieldName, sourceField);
}
/**
* INTERNAL:
* In JPA users may specify a maps id mapping on a shared embeddable
* descriptor. These mappings need to be set to read-only at initialize
* time, after the reference descriptor is cloned.
*/
public void addMapsIdMapping(DatabaseMapping mapping) {
mapsIdMappings.add(mapping);
}
/**
* INTERNAL:
* Add a nested field translation that maps from a field in the source table
* to a field name in a nested aggregate descriptor. These are handled
* slightly different that regular field translations in that they are
* unique based on the attribute name. It solves the case where multiple
* nested embeddables have mappings to similarly named default columns.
*/
@Override
public void addNestedFieldTranslation(String attributeName, DatabaseField sourceField, String aggregateFieldName) {
// Aggregate field name is redundant here as we will look up the field
// through the attribute name. This method signature is to satisfy the
// Embeddable interface. AggregateCollectionMapping uses the aggregate
// field name.
nestedFieldTranslations.put(attributeName, new Object[]{sourceField, aggregateFieldName});
}
/**
* INTERNAL:
* In JPA users may specify overrides to apply to a many to many mapping
* on a shared embeddable descriptor. These settings are applied at
* initialize time, after the reference descriptor is cloned.
*/
@Override
public void addOverrideManyToManyMapping(ManyToManyMapping mapping) {
overrideManyToManyMappings.add(mapping);
}
/**
* INTERNAL:
* In JPA users may specify overrides to apply to a unidirectional one to
* many mapping on a shared embeddable descriptor. These settings are
* applied at initialize time, after the reference descriptor is cloned.
*/
@Override
public void addOverrideUnidirectionalOneToManyMapping(UnidirectionalOneToManyMapping mapping) {
overrideUnidirectionalOneToManyMappings.add(mapping);
}
/**
* INTERNAL:
* For mappings used as MapKeys in MappedKeyContainerPolicy. Add the target of this mapping to the deleted
* objects list if necessary
*
* This method is used for removal of private owned relationships.
* AggregateObjectMappings are dealt with in their parent delete, so this is a no-op.
*
* @param object
* @param deletedObjects
*/
@Override
public void addKeyToDeletedObjectsList(Object object, Map deletedObjects){
}
/**
* INTERNAL:
* Return whether all the aggregate fields in the specified
* row are NULL.
*/
protected boolean allAggregateFieldsAreNull(AbstractRecord databaseRow) {
Vector fields = getReferenceFields();
int size = fields.size();
for (int index = 0; index < size; index++) {
DatabaseField field = (DatabaseField)fields.get(index);
Object value = databaseRow.get(field);
if (value != null) {
return false;
}
}
return true;
}
/**
* PUBLIC:
* If all the fields in the database row for the aggregate object are NULL,
* then, by default, the mapping will place a null in the appropriate source object
* (as opposed to an aggregate object filled with nulls). This behavior can be
* explicitly set by calling #allowNull().
* To change this behavior, call #dontAllowNull(). Then the mapping
* will build a new instance of the aggregate object that is filled with nulls
* and place it in the source object.
* In either situation, when writing, the mapping will place a NULL in all the
* fields in the database row for the aggregate object.
*
* Note: Any aggregate that has a relationship mapping automatically does not allow
* null.
*/
public void allowNull() {
setIsNullAllowed(true);
}
/**
* INTERNAL:
* Return whether the query's backup object has an attribute
* value of null.
*/
protected boolean backupAttributeValueIsNull(WriteObjectQuery query) {
if (query.getSession().isUnitOfWork()) {
Object backupAttributeValue = getAttributeValueFromObject(query.getBackupClone());
if (backupAttributeValue == null) {
return true;
}
}
return false;
}
/**
* INTERNAL:
* Clone and prepare the selection query as a nested batch read query.
* This is used for nested batch reading.
*/
public ObjectBuildingQuery prepareNestedQuery(ObjectBuildingQuery sourceQuery) {
if (sourceQuery.isObjectLevelReadQuery()) {
ObjectLevelReadQuery objectQuery = (ObjectLevelReadQuery)sourceQuery;
ObjectLevelReadQuery nestedObjectQuery = objectQuery.getAggregateQuery(this);
if (nestedObjectQuery != null) {
return nestedObjectQuery;
}
nestedObjectQuery = objectQuery;
String attributeName = getAttributeName();
if ((objectQuery.isPartialAttribute(attributeName))) {
// A nested query must be built to pass to the descriptor that looks like the real query execution would.
nestedObjectQuery = (ObjectLevelReadQuery)objectQuery.clone();
// Must cascade the nested partial/join expression and filter the nested ones.
if (objectQuery.hasPartialAttributeExpressions()) {
nestedObjectQuery.setPartialAttributeExpressions(extractNestedExpressions(objectQuery.getPartialAttributeExpressions(), nestedObjectQuery.getExpressionBuilder()));
}
}
if (objectQuery.isAttributeBatchRead(this.descriptor, attributeName)) {
if (nestedObjectQuery == objectQuery) {
// A nested query must be built to pass to the descriptor that looks like the real query execution would.
nestedObjectQuery = (ObjectLevelReadQuery)nestedObjectQuery.clone();
}
// Must carry over properties for batching to work.
nestedObjectQuery.setProperties(objectQuery.getProperties());
// Computed nested batch attribute expressions.
nestedObjectQuery.getBatchFetchPolicy().setAttributeExpressions(extractNestedExpressions(objectQuery.getBatchReadAttributeExpressions(), nestedObjectQuery.getExpressionBuilder()));
nestedObjectQuery.computeBatchReadAttributes();
}
FetchGroup parentQueryFetchGroup = sourceQuery.getExecutionFetchGroup(this.descriptor);
if (parentQueryFetchGroup != null) {
if (nestedObjectQuery == objectQuery) {
// A nested query must be built to pass to the descriptor that looks like the real query execution would.
nestedObjectQuery = (ObjectLevelReadQuery)nestedObjectQuery.clone();
}
FetchGroup targetFetchGroup = parentQueryFetchGroup.getGroup(getAttributeName());
if (targetFetchGroup != null && sourceQuery.getDescriptor().hasFetchGroupManager()) {
//if the parent object has a fetchgroup manager then aggregates can support a fetchgroup manager
nestedObjectQuery.setFetchGroup(targetFetchGroup);
} else {
targetFetchGroup = null;
nestedObjectQuery.setFetchGroup(null);
nestedObjectQuery.setFetchGroupName(null);
}
}
if (nestedObjectQuery != sourceQuery) {
objectQuery.setAggregateQuery(this, nestedObjectQuery);
return nestedObjectQuery;
}
}
return sourceQuery;
}
/**
* INTERNAL:
* Build and return an aggregate object from the specified row.
* If a null value is allowed and all the appropriate fields in the row are NULL, return a null.
* If an aggregate is referenced by the target object, return it (maintain identity)
* Otherwise, simply create a new aggregate object and return it.
*/
public Object buildAggregateFromRow(AbstractRecord databaseRow, Object targetObject, CacheKey cacheKey, JoinedAttributeManager joinManager, ObjectBuildingQuery sourceQuery, boolean buildShallowOriginal, AbstractSession executionSession, boolean targetIsProtected) throws DatabaseException {
if (databaseRow.hasSopObject()) {
Object sopAggregate = getAttributeValueFromObject(databaseRow.getSopObject());
if ((targetObject != null) && (targetObject != databaseRow.getSopObject())) {
setAttributeValueInObject(targetObject, sopAggregate);
}
return sopAggregate;
}
// check for all NULLs
if (isNullAllowed() && allAggregateFieldsAreNull(databaseRow)) {
return null;
}
// maintain object identity (even if not refreshing) if target object references the aggregate
// if aggregate is not referenced by the target object, construct a new aggregate
Object aggregate = null;
ClassDescriptor descriptor = getReferenceDescriptor();
boolean refreshing = true;
if (targetObject != null){
if (descriptor.hasInheritance()) {
Class newAggregateClass = descriptor.getInheritancePolicy().classFromRow(databaseRow, executionSession);
descriptor = getReferenceDescriptor(newAggregateClass, executionSession);
aggregate = getMatchingAttributeValueFromObject(databaseRow, targetObject, executionSession, descriptor);
if ((aggregate != null) && (aggregate.getClass() != newAggregateClass)) {
// if the class has changed out from underneath us, we cannot preserve object identity
// build a new instance of the *new* class
aggregate = descriptor.getObjectBuilder().buildNewInstance();
refreshing = false;
}
} else {
aggregate = getMatchingAttributeValueFromObject(databaseRow, targetObject, executionSession, descriptor);
}
}
// Build a new aggregate if the target object does not reference an existing aggregate.
// EL Bug 474956 - build a new aggregate if the the target object references an existing aggregate, and
// the passed cacheKey is null from the invalidation of the target object in the IdentityMap.
if (aggregate == null || (aggregate != null && cacheKey == null)) {
aggregate = descriptor.getObjectBuilder().buildNewInstance();
refreshing = false;
}
ObjectBuildingQuery nestedQuery = prepareNestedQuery(sourceQuery);
FetchGroup targetFetchGroup = null;
if (nestedQuery.isObjectLevelReadQuery()) {
targetFetchGroup = ((ObjectLevelReadQuery)nestedQuery).getFetchGroup();
if (refreshing && descriptor.hasFetchGroupManager()) {
descriptor.getFetchGroupManager().unionEntityFetchGroupIntoObject(aggregate, descriptor.getFetchGroupManager().getEntityFetchGroup(targetFetchGroup), executionSession, true);
//merge fetchgroup into aggregate fetchgroup that may have been there from previous read.
}
}
if (buildShallowOriginal) {
descriptor.getObjectBuilder().buildAttributesIntoShallowObject(aggregate, databaseRow, nestedQuery);
} else if (executionSession.isUnitOfWork()) {
descriptor.getObjectBuilder().buildAttributesIntoWorkingCopyClone(aggregate, buildWrapperCacheKeyForAggregate(cacheKey, targetIsProtected), nestedQuery, joinManager, databaseRow, (UnitOfWorkImpl)executionSession, refreshing);
} else {
descriptor.getObjectBuilder().buildAttributesIntoObject(aggregate, buildWrapperCacheKeyForAggregate(cacheKey, targetIsProtected), databaseRow, nestedQuery, joinManager, nestedQuery.getExecutionFetchGroup(descriptor), refreshing, executionSession);
}
if ((targetFetchGroup != null) && descriptor.hasFetchGroupManager() && cacheKey != null
&& !refreshing && sourceQuery.shouldMaintainCache() && !sourceQuery.shouldStoreBypassCache()) {
// Set the fetch group to the domain object, after built.
EntityFetchGroup entityFetchGroup = descriptor.getFetchGroupManager().getEntityFetchGroup(targetFetchGroup);
if (entityFetchGroup != null) {
entityFetchGroup = (EntityFetchGroup)entityFetchGroup.clone();
entityFetchGroup.setRootEntity((FetchGroupTracker) cacheKey.getObject());
entityFetchGroup.setOnEntity(aggregate, executionSession);
}
}
return aggregate;
}
/**
* INTERNAL:
* Wrap the aggregate represented by this mapping in a cachekey so it can be processed my
* methods down the stack.
* @param owningCacheKey - the cache key holding the object to extract the aggregate from
* @return
*/
protected CacheKey buildWrapperCacheKeyForAggregate(CacheKey owningCacheKey, boolean targetIsProtected) {
if (!this.descriptor.getCachePolicy().isProtectedIsolation()) {
return owningCacheKey;
}
if (!targetIsProtected || this.isMapKeyMapping || (owningCacheKey == null)) {
return owningCacheKey;
}
CacheKey aggregateKey = owningCacheKey;
Object object = owningCacheKey.getObject();
if (owningCacheKey.getObject() != null) {
Object aggregate = getAttributeValueFromObject(object);
aggregateKey = new CacheKey(null, aggregate, null);
aggregateKey.setProtectedForeignKeys(owningCacheKey.getProtectedForeignKeys());
aggregateKey.setRecord(owningCacheKey.getRecord());
aggregateKey.setIsolated(owningCacheKey.isIsolated());
aggregateKey.setReadTime(owningCacheKey.getReadTime());
}
return aggregateKey;
}
/**
* INTERNAL:
* Write null values for all aggregate fields into the parent row.
*/
protected void writeNullReferenceRow(AbstractRecord record) {
List fields = getReferenceFields();
int size = fields.size();
boolean nullInserted = false;
for (int index = 0; index < size; index++) {
DatabaseField field = fields.get(index);
// EL Bug 393520
if (!field.isReadOnly() && (field.isUpdatable() || field.isInsertable())) {
record.put(field, null);
nullInserted = true;
}
}
if (size > 0 && nullInserted) {
// EL Bug 319759 - if a field is null, then the update call cache should not be used
record.setNullValueInFields(true);
}
}
/**
* INTERNAL:
* Used to allow object level comparisons.
* In the case of an Aggregate which has no primary key must do an attribute
* by attribute comparison.
*/
@Override
public Expression buildObjectJoinExpression(Expression expression, Object value, AbstractSession session) {
Expression attributeByAttributeComparison = null;
Expression join = null;
Object attributeValue = null;
// value need not be unwrapped as it is an aggregate, nor should it
// influence a call to getReferenceDescriptor.
ClassDescriptor referenceDescriptor = getReferenceDescriptor();
if ((value != null) && !referenceDescriptor.getJavaClass().isInstance(value)) {
throw QueryException.incorrectClassForObjectComparison(expression, value, this);
}
Enumeration mappings = referenceDescriptor.getMappings().elements();
for (; mappings.hasMoreElements();) {
DatabaseMapping mapping = (DatabaseMapping)mappings.nextElement();
if (value == null) {
attributeValue = null;
} else {
attributeValue = mapping.getAttributeValueFromObject(value);
}
join = expression.get(mapping.getAttributeName()).equal(attributeValue);
if (attributeByAttributeComparison == null) {
attributeByAttributeComparison = join;
} else {
attributeByAttributeComparison = attributeByAttributeComparison.and(join);
}
}
return attributeByAttributeComparison;
}
/**
* INTERNAL:
* Used to allow object level comparisons.
*/
@Override
public Expression buildObjectJoinExpression(Expression expression, Expression argument, AbstractSession session) {
Expression attributeByAttributeComparison = null;
//Enumeration mappingsEnum = getSourceToTargetKeyFields().elements();
Enumeration mappingsEnum = getReferenceDescriptor().getMappings().elements();
for (; mappingsEnum.hasMoreElements();) {
DatabaseMapping mapping = (DatabaseMapping)mappingsEnum.nextElement();
String attributeName = mapping.getAttributeName();
Expression join = expression.get(attributeName).equal(argument.get(attributeName));
if (attributeByAttributeComparison == null) {
attributeByAttributeComparison = join;
} else {
attributeByAttributeComparison = attributeByAttributeComparison.and(join);
}
}
return attributeByAttributeComparison;
}
/**
* INTERNAL:
* Write the aggregate values into the parent row.
*/
protected void writeToRowFromAggregate(AbstractRecord record, Object object, Object attributeValue, AbstractSession session, WriteType writeType) throws DescriptorException {
if (attributeValue == null) {
if (this.isNullAllowed) {
writeNullReferenceRow(record);
} else {
throw DescriptorException.nullForNonNullAggregate(object, this);
}
} else {
if (!session.isClassReadOnly(attributeValue.getClass())) {
getObjectBuilder(attributeValue, session).buildRow(record, attributeValue, session, writeType);
}
}
}
/**
* INTERNAL:
* Write the aggregate values into the parent row for shallow insert.
*/
protected void writeToRowFromAggregateForShallowInsert(AbstractRecord record, Object object, Object attributeValue, AbstractSession session) throws DescriptorException {
if (attributeValue == null) {
if (this.isNullAllowed) {
writeNullReferenceRow(record);
} else {
throw DescriptorException.nullForNonNullAggregate(object, this);
}
} else {
if (!session.isClassReadOnly(attributeValue.getClass())) {
getObjectBuilder(attributeValue, session).buildRowForShallowInsert(record, attributeValue, session);
}
}
}
/**
* INTERNAL:
* Write the aggregate values into the parent row for update after shallow insert.
*/
protected void writeToRowFromAggregateForUpdateAfterShallowInsert(AbstractRecord record, Object object, Object attributeValue, AbstractSession session, DatabaseTable table) throws DescriptorException {
if (attributeValue == null) {
if (!this.isNullAllowed) {
throw DescriptorException.nullForNonNullAggregate(object, this);
}
} else {
if (!session.isClassReadOnly(attributeValue.getClass()) && !isPrimaryKeyMapping()) {
getObjectBuilder(attributeValue, session).buildRowForUpdateAfterShallowInsert(record, attributeValue, session, table);
}
}
}
/**
* INTERNAL:
* Write the aggregate values into the parent row for update before shallow delete.
*/
protected void writeToRowFromAggregateForUpdateBeforeShallowDelete(AbstractRecord record, Object object, Object attributeValue, AbstractSession session, DatabaseTable table) throws DescriptorException {
if (attributeValue == null) {
if (!this.isNullAllowed) {
throw DescriptorException.nullForNonNullAggregate(object, this);
}
} else {
if (!session.isClassReadOnly(attributeValue.getClass()) && !isPrimaryKeyMapping()) {
getObjectBuilder(attributeValue, session).buildRowForUpdateBeforeShallowDelete(record, attributeValue, session, table);
}
}
}
/**
* INTERNAL:
* Build and return a database row built with the values from
* the specified attribute value.
*/
protected void writeToRowFromAggregateWithChangeRecord(AbstractRecord record, ChangeRecord changeRecord, ObjectChangeSet objectChangeSet, AbstractSession session, WriteType writeType) throws DescriptorException {
if (objectChangeSet == null) {
if (this.isNullAllowed) {
writeNullReferenceRow(record);
} else {
Object object = ((ObjectChangeSet)changeRecord.getOwner()).getUnitOfWorkClone();
throw DescriptorException.nullForNonNullAggregate(object, this);
}
} else {
if (!session.isClassReadOnly(objectChangeSet.getClassType(session))) {
getReferenceDescriptor(objectChangeSet.getClassType(session), session).getObjectBuilder().buildRowWithChangeSet(record, objectChangeSet, session, writeType);
}
}
}
/**
* INTERNAL:
* Build and return a database row built with the changed values from
* the specified attribute value.
*/
protected void writeToRowFromAggregateForUpdate(AbstractRecord record, WriteObjectQuery query, Object attributeValue) throws DescriptorException {
if (attributeValue == null) {
if (this.isNullAllowed) {
if (backupAttributeValueIsNull(query)) {
// both attributes are null - no update required
} else {
writeNullReferenceRow(record);
}
} else {
throw DescriptorException.nullForNonNullAggregate(query.getObject(), this);
}
} else if ((query.getBackupClone() != null) && ((getMatchingBackupAttributeValue(query, attributeValue) == null) || !(attributeValue.getClass().equals(getMatchingBackupAttributeValue(query, attributeValue).getClass())))) {
getObjectBuilder(attributeValue, query.getSession()).buildRow(record, attributeValue, query.getSession(), WriteType.UPDATE);
} else {
if (!query.getSession().isClassReadOnly(attributeValue.getClass())) {
WriteObjectQuery clonedQuery = (WriteObjectQuery)query.clone();
clonedQuery.setObject(attributeValue);
if (query.getSession().isUnitOfWork()) {
Object backupAttributeValue = getMatchingBackupAttributeValue(query, attributeValue);
if (backupAttributeValue == null) {
backupAttributeValue = getObjectBuilder(attributeValue, query.getSession()).buildNewInstance();
}
clonedQuery.setBackupClone(backupAttributeValue);
}
getObjectBuilder(attributeValue, query.getSession()).buildRowForUpdate(record, clonedQuery);
}
}
}
/**
* INTERNAL:
* Clone the attribute from the original and assign it to the clone.
*/
@Override
public void buildClone(Object original, CacheKey cacheKey, Object clone, Integer refreshCascade, AbstractSession cloningSession) {
Object attributeValue = getAttributeValueFromObject(original);
Object aggregateClone = buildClonePart(original, clone, cacheKey, attributeValue, refreshCascade, cloningSession);
if (aggregateClone != null && cloningSession.isUnitOfWork()) {
ClassDescriptor descriptor = getReferenceDescriptor(aggregateClone, cloningSession);
descriptor.getObjectChangePolicy().setAggregateChangeListener(clone, aggregateClone, (UnitOfWorkImpl)cloningSession, descriptor, getAttributeName());
}
setAttributeValueInObject(clone, aggregateClone);
}
/**
* INTERNAL:
* Build a clone of the given element in a unitOfWork
* @param attributeValue
* @param parent
* @param parentCacheKey
* @param refreshCascade
* @param cloningSession
* @param isExisting
* @param isFromSharedCache
* @return
*/
@Override
public Object buildElementClone(Object attributeValue, Object parent, CacheKey parentCacheKey, Integer refreshCascade, AbstractSession cloningSession, boolean isExisting, boolean isFromSharedCache){
Object aggregateClone = buildClonePart(attributeValue, parent, parentCacheKey, refreshCascade, cloningSession, !isExisting);
if (aggregateClone != null && cloningSession.isUnitOfWork()) {
ClassDescriptor descriptor = getReferenceDescriptor(aggregateClone, cloningSession);
descriptor.getObjectChangePolicy().setAggregateChangeListener(parent, aggregateClone, (UnitOfWorkImpl)cloningSession, descriptor, getAttributeName());
}
return aggregateClone;
}
/**
* INTERNAL:
* Set the change listener in the aggregate.
*/
@Override
public void setChangeListener(Object clone, PropertyChangeListener listener, UnitOfWorkImpl uow) {
Object attributeValue = getAttributeValueFromObject(clone);
if (attributeValue != null) {
ClassDescriptor descriptor = getReferenceDescriptor(attributeValue, uow);
descriptor.getObjectChangePolicy().setAggregateChangeListener(clone, attributeValue, uow, descriptor, getAttributeName());
}
}
/**
* INTERNAL:
* A combination of readFromRowIntoObject and buildClone.
*
* buildClone assumes the attribute value exists on the original and can
* simply be copied.
*
* readFromRowIntoObject assumes that one is building an original.
*
* Both of the above assumptions are false in this method, and actually
* attempts to do both at the same time.
*
* Extract value from the row and set the attribute to this value in the
* working copy clone.
* In order to bypass the shared cache when in transaction a UnitOfWork must
* be able to populate working copies directly from the row.
*/
@Override
public void buildCloneFromRow(AbstractRecord databaseRow, JoinedAttributeManager joinManager, Object clone, CacheKey sharedCacheKey, ObjectBuildingQuery sourceQuery, UnitOfWorkImpl unitOfWork, AbstractSession executionSession) {
// This method is a combination of buildggregateFromRow and buildClonePart on the super class.
// None of buildClonePart used, as not an orignal new object, nor do we worry about creating heavy clones for aggregate objects.
// Ensure that the shared CacheKey is passed, as this will be set to null for a refresh of an invalid object.
Object clonedAttributeValue = buildAggregateFromRow(databaseRow, clone, sharedCacheKey, joinManager, sourceQuery, false, executionSession, true);
if (clonedAttributeValue != null) {
ClassDescriptor descriptor = getReferenceDescriptor(clonedAttributeValue, unitOfWork);
descriptor.getObjectChangePolicy().setAggregateChangeListener(clone, clonedAttributeValue, unitOfWork, descriptor, getAttributeName());
}
setAttributeValueInObject(clone, clonedAttributeValue);
}
/**
* INTERNAL:
* Builds a shallow original object. Only direct attributes and primary
* keys are populated. In this way the minimum original required for
* instantiating a working copy clone can be built without placing it in
* the shared cache (no concern over cycles).
*/
@Override
public void buildShallowOriginalFromRow(AbstractRecord databaseRow, Object original, JoinedAttributeManager joinManager, ObjectBuildingQuery sourceQuery, AbstractSession executionSession) {
Object aggregate = buildAggregateFromRow(databaseRow, original, null, joinManager, sourceQuery, true, executionSession, true);// shallow only.
setAttributeValueInObject(original, aggregate);
}
/**
* INTERNAL:
* Certain key mappings favor different types of selection query. Return the appropriate
* type of selectionQuery
* @return
*/
@Override
public ReadQuery buildSelectionQueryForDirectCollectionKeyMapping(ContainerPolicy containerPolicy){
ReadAllQuery query = new ReadAllQuery();
query.setReferenceClass(referenceClass);
query.setDescriptor(getReferenceDescriptor());
query.setContainerPolicy(containerPolicy);
return query;
}
/**
* INTERNAL:
* Build and return a "template" database row with all the fields
* set to null.
*/
protected AbstractRecord buildTemplateInsertRow(AbstractSession session) {
AbstractRecord result = getReferenceDescriptor().getObjectBuilder().buildTemplateInsertRow(session);
List processedMappings = (List)getReferenceDescriptor().getMappings().clone();
if (getReferenceDescriptor().hasInheritance()) {
for (ClassDescriptor child : getReferenceDescriptor().getInheritancePolicy().getChildDescriptors()) {
for (DatabaseMapping mapping : child.getMappings()) {
// Only write mappings once.
if (!processedMappings.contains(mapping)) {
mapping.writeInsertFieldsIntoRow(result, session);
processedMappings.add(mapping);
}
}
}
}
return result;
}
/**
* INTERNAL:
* Cascade discover and persist new objects during commit to the map key
*/
@Override
public void cascadeDiscoverAndPersistUnregisteredNewObjects(Object object, Map newObjects, Map unregisteredExistingObjects, Map visitedObjects, UnitOfWorkImpl uow, boolean getAttributeValueFromObject, Set cascadeErrors){
ObjectBuilder builder = getReferenceDescriptor(object.getClass(), uow).getObjectBuilder();
builder.cascadeDiscoverAndPersistUnregisteredNewObjects(object, newObjects, unregisteredExistingObjects, visitedObjects, uow, cascadeErrors);
}
/**
* INTERNAL:
* Cascade perform delete through mappings that require the cascade
*/
@Override
public void cascadePerformRemoveIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects, boolean getAttributeValueFromObject) {
Object objectReferenced = null;
if (getAttributeValueFromObject){
//objects referenced by this mapping are not registered as they have
// no identity, however mappings from the referenced object may need cascading.
objectReferenced = getAttributeValueFromObject(object);
} else {
objectReferenced = object;
}
if ((objectReferenced == null)) {
return;
}
if (!visitedObjects.containsKey(objectReferenced)) {
visitedObjects.put(objectReferenced, objectReferenced);
ObjectBuilder builder = getReferenceDescriptor(objectReferenced.getClass(), uow).getObjectBuilder();
builder.cascadePerformRemove(objectReferenced, uow, visitedObjects);
}
}
/**
* INTERNAL:
* Cascade perform delete through mappings that require the cascade
*/
@Override
public void cascadePerformRemoveIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects) {
cascadePerformRemoveIfRequired(object, uow, visitedObjects, true);
}
/**
* INTERNAL:
* Cascade perform removal of orphaned private owned objects from the UnitOfWorkChangeSet
*/
@Override
public void cascadePerformRemovePrivateOwnedObjectFromChangeSetIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects) {
Object attributeValue = getAttributeValueFromObject(object);
if (attributeValue == null) {
return;
}
if (!visitedObjects.containsKey(attributeValue)) {
visitedObjects.put(attributeValue, attributeValue);
ObjectBuilder builder = getReferenceDescriptor(attributeValue, uow).getObjectBuilder();
// cascade perform remove any related objects via ObjectBuilder for an aggregate object
builder.cascadePerformRemovePrivateOwnedObjectFromChangeSet(attributeValue, uow, visitedObjects);
}
}
/**
* INTERNAL:
* Cascade registerNew for Create through mappings that require the cascade
*/
@Override
public void cascadeRegisterNewIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects, boolean getAttributeValueFromObject) {
Object objectReferenced = null;
//aggregate objects are not registered but their mappings should be.
if (getAttributeValueFromObject){
objectReferenced = getAttributeValueFromObject(object);
} else {
objectReferenced = object;
}
if ((objectReferenced == null)) {
return;
}
if (!visitedObjects.containsKey(objectReferenced)) {
visitedObjects.put(objectReferenced, objectReferenced);
ObjectBuilder builder = getReferenceDescriptor(objectReferenced.getClass(), uow).getObjectBuilder();
builder.cascadeRegisterNewForCreate(objectReferenced, uow, visitedObjects);
}
}
/**
* INTERNAL:
* Cascade registerNew for Create through mappings that require the cascade
*/
@Override
public void cascadeRegisterNewIfRequired(Object object, UnitOfWorkImpl uow, Map visitedObjects) {
cascadeRegisterNewIfRequired(object, uow, visitedObjects, true);
}
/**
* INTERNAL:
* Clone the aggregate to source field names. AggregateCollectionMapping
* needs each nested embedded mapping to have its own list of aggregate
* to source field names so that it can apply nested override names to
* shared aggregate object mappings.
*/
@Override
public Object clone() {
AggregateObjectMapping mappingObject = (AggregateObjectMapping) super.clone();
Map aggregateToSourceFields = new HashMap<>();
aggregateToSourceFields.putAll(getAggregateToSourceFields());
mappingObject.setAggregateToSourceFields(aggregateToSourceFields);
Map nestedTranslations = new HashMap<>();
nestedTranslations.putAll(getNestedFieldTranslations());
mappingObject.setNestedFieldTranslations(nestedTranslations);
return mappingObject;
}
/**
* INTERNAL:
* Return the fields handled by the mapping.
*/
@Override
protected Vector collectFields() {
return getReferenceFields();
}
/**
* INTERNAL:
* Aggregate order by all their fields by default.
*/
@Override
public List getOrderByNormalizedExpressions(Expression base) {
List orderBys = new ArrayList(this.fields.size());
for (DatabaseField field : this.fields) {
orderBys.add(base.getField(field));
}
return orderBys;
}
/**
* INTERNAL:
* This method is used to store the FK fields that can be cached that correspond to noncacheable mappings
* the FK field values will be used to re-issue the query when cloning the shared cache entity
*/
@Override
public void collectQueryParameters(Set record){
for (DatabaseMapping mapping : getReferenceDescriptor().getMappings()){
if ((mapping.isForeignReferenceMapping() && !mapping.isCacheable()) || (mapping.isAggregateObjectMapping() && mapping.getReferenceDescriptor().hasNoncacheableMappings())){
mapping.collectQueryParameters(record);
}
}
}
/**
* INTERNAL:
* Convert all the class-name-based settings in this mapping to actual
* class-based settings. This method is used when converting a project that
* has been built with class names to a project with classes.
* @param classLoader
*/
@Override
public void convertClassNamesToClasses(ClassLoader classLoader) {
super.convertClassNamesToClasses(classLoader);
for (Converter converter : converters.values()) {
// Convert and any Converter class names.
convertConverterClassNamesToClasses(converter, classLoader);
}
}
/**
* INTERNAL
* Called when a DatabaseMapping is used to map the key in a collection. Returns the key.
*/
@Override
public Object createMapComponentFromRow(AbstractRecord dbRow, ObjectBuildingQuery query, CacheKey parentCacheKey, AbstractSession session, boolean isTargetProtected){
Object key = buildAggregateFromRow(dbRow, null, parentCacheKey, null, query, false, session, isTargetProtected);
return key;
}
/**
* INTERNAL:
* Creates the Array of simple types used to recreate this map.
*/
@Override
public Object createSerializableMapKeyInfo(Object key, AbstractSession session){
return key; // Embeddables have no identity so they are not reduced to PK.
}
/**
* INTERNAL:
* Create an instance of the Key object from the key information extracted from the map.
* This may return the value directly in case of a simple key or will be used as the FK to load a related entity.
*/
@Override
public List