Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or .
*/
package org.hibernate.loader;
import java.io.Serializable;
import java.sql.CallableStatement;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.hibernate.AssertionFailure;
import org.hibernate.HibernateException;
import org.hibernate.LockMode;
import org.hibernate.LockOptions;
import org.hibernate.QueryException;
import org.hibernate.ScrollMode;
import org.hibernate.Session;
import org.hibernate.StaleObjectStateException;
import org.hibernate.WrongClassException;
import org.hibernate.bytecode.enhance.spi.interceptor.EnhancementAsProxyLazinessInterceptor;
import org.hibernate.cache.spi.FilterKey;
import org.hibernate.cache.spi.QueryKey;
import org.hibernate.cache.spi.QueryResultsCache;
import org.hibernate.cache.spi.access.EntityDataAccess;
import org.hibernate.cache.spi.entry.CacheEntry;
import org.hibernate.cache.spi.entry.ReferenceCacheEntryImpl;
import org.hibernate.collection.spi.PersistentCollection;
import org.hibernate.dialect.Dialect;
import org.hibernate.dialect.pagination.LimitHandler;
import org.hibernate.dialect.pagination.LimitHelper;
import org.hibernate.dialect.pagination.NoopLimitHandler;
import org.hibernate.engine.internal.CacheHelper;
import org.hibernate.engine.internal.TwoPhaseLoad;
import org.hibernate.engine.jdbc.ColumnNameCache;
import org.hibernate.engine.jdbc.spi.JdbcCoordinator;
import org.hibernate.engine.jdbc.spi.JdbcServices;
import org.hibernate.engine.loading.internal.CollectionLoadContext;
import org.hibernate.engine.spi.BatchFetchQueue;
import org.hibernate.engine.spi.EntityEntry;
import org.hibernate.engine.spi.EntityKey;
import org.hibernate.engine.spi.EntityUniqueKey;
import org.hibernate.engine.spi.PersistenceContext;
import org.hibernate.engine.spi.PersistentAttributeInterceptable;
import org.hibernate.engine.spi.PersistentAttributeInterceptor;
import org.hibernate.engine.spi.QueryParameters;
import org.hibernate.engine.spi.RowSelection;
import org.hibernate.engine.spi.SessionFactoryImplementor;
import org.hibernate.engine.spi.SharedSessionContractImplementor;
import org.hibernate.engine.spi.SubselectFetch;
import org.hibernate.engine.spi.TypedValue;
import org.hibernate.event.service.spi.EventListenerRegistry;
import org.hibernate.event.spi.EventSource;
import org.hibernate.event.spi.EventType;
import org.hibernate.event.spi.PostLoadEvent;
import org.hibernate.event.spi.PreLoadEvent;
import org.hibernate.event.spi.PreLoadEventListener;
import org.hibernate.hql.internal.HolderInstantiator;
import org.hibernate.internal.CoreLogging;
import org.hibernate.internal.CoreMessageLogger;
import org.hibernate.internal.FetchingScrollableResultsImpl;
import org.hibernate.internal.ScrollableResultsImpl;
import org.hibernate.internal.util.StringHelper;
import org.hibernate.internal.util.collections.CollectionHelper;
import org.hibernate.loader.spi.AfterLoadAction;
import org.hibernate.persister.collection.CollectionPersister;
import org.hibernate.persister.entity.EntityPersister;
import org.hibernate.persister.entity.Loadable;
import org.hibernate.persister.entity.UniqueKeyLoadable;
import org.hibernate.pretty.MessageHelper;
import org.hibernate.proxy.HibernateProxy;
import org.hibernate.query.spi.ScrollableResultsImplementor;
import org.hibernate.stat.spi.StatisticsImplementor;
import org.hibernate.transform.CacheableResultTransformer;
import org.hibernate.transform.ResultTransformer;
import org.hibernate.type.AssociationType;
import org.hibernate.type.EntityType;
import org.hibernate.type.Type;
import org.hibernate.type.VersionType;
/**
* Abstract superclass of object loading (and querying) strategies. This class implements
* useful common functionality that concrete loaders delegate to. It is not intended that this
* functionality would be directly accessed by client code. (Hence, all methods of this class
* are declared protected or private.) This class relies heavily upon the
* Loadable interface, which is the contract between this class and
* EntityPersisters that may be loaded by it.
*
* The present implementation is able to load any number of columns of entities and at most
* one collection role per query.
*
* @author Gavin King
* @see org.hibernate.persister.entity.Loadable
*/
public abstract class Loader {
public static final String SELECT = "select";
public static final String SELECT_DISTINCT = "select distinct";
protected static final CoreMessageLogger LOG = CoreLogging.messageLogger( Loader.class );
private final SessionFactoryImplementor factory;
private volatile ColumnNameCache columnNameCache;
private final boolean enhancementAsProxyEnabled;
private boolean isJdbc4 = true;
public Loader(SessionFactoryImplementor factory) {
this.factory = factory;
this.enhancementAsProxyEnabled = factory.getSessionFactoryOptions().isEnhancementAsProxyEnabled();
}
/**
* The SQL query string to be called; implemented by all subclasses
*
* @return The sql command this loader should use to get its {@link ResultSet}.
*/
public abstract String getSQLString();
/**
* An array of persisters of entity classes contained in each row of results;
* implemented by all subclasses
*
* @return The entity persisters.
*/
protected abstract Loadable[] getEntityPersisters();
/**
* An array indicating whether the entities have eager property fetching
* enabled.
*
* @return Eager property fetching indicators.
*/
protected boolean[] getEntityEagerPropertyFetches() {
return null;
}
/**
* An array of indexes of the entity that owns a one-to-one association
* to the entity at the given index (-1 if there is no "owner"). The
* indexes contained here are relative to the result of
* {@link #getEntityPersisters}.
*
* @return The owner indicators (see discussion above).
*/
protected int[] getOwners() {
return null;
}
/**
* An array of the owner types corresponding to the {@link #getOwners()}
* returns. Indices indicating no owner would be null here.
*
* @return The types for the owners.
*/
protected EntityType[] getOwnerAssociationTypes() {
return null;
}
/**
* An (optional) persister for a collection to be initialized; only
* collection loaders return a non-null value
*/
protected CollectionPersister[] getCollectionPersisters() {
return null;
}
/**
* Get the index of the entity that owns the collection, or -1
* if there is no owner in the query results (ie. in the case of a
* collection initializer) or no collection.
*/
protected int[] getCollectionOwners() {
return null;
}
protected int[][] getCompositeKeyManyToOneTargetIndices() {
return null;
}
/**
* What lock options does this load entities with?
*
* @param lockOptions a collection of lock options specified dynamically via the Query interface
*/
//protected abstract LockOptions[] getLockOptions(Map lockOptions);
protected abstract LockMode[] getLockModes(LockOptions lockOptions);
/**
* Append FOR UPDATE OF clause, if necessary. This
* empty superclass implementation merely returns its first
* argument.
*/
protected String applyLocks(
String sql,
QueryParameters parameters,
Dialect dialect,
List afterLoadActions) throws HibernateException {
return sql;
}
/**
* Does this query return objects that might be already cached
* by the session, whose lock mode may need upgrading
*/
protected boolean upgradeLocks() {
return false;
}
/**
* Return false is this loader is a batch entity loader
*/
protected boolean isSingleRowLoader() {
return false;
}
/**
* Get the SQL table aliases of entities whose
* associations are subselect-loadable, returning
* null if this loader does not support subselect
* loading
*/
protected String[] getAliases() {
return null;
}
/**
* Modify the SQL, adding lock hints and comments, if necessary
*/
protected String preprocessSQL(
String sql,
QueryParameters parameters,
SessionFactoryImplementor sessionFactory,
List afterLoadActions) throws HibernateException {
Dialect dialect = sessionFactory.getServiceRegistry().getService( JdbcServices.class ).getDialect();
sql = applyLocks( sql, parameters, dialect, afterLoadActions );
sql = dialect.addSqlHintOrComment(
sql,
parameters,
sessionFactory.getSessionFactoryOptions().isCommentsEnabled()
);
return processDistinctKeyword( sql, parameters );
}
protected boolean shouldUseFollowOnLocking(
QueryParameters parameters,
Dialect dialect,
List afterLoadActions) {
if ( ( parameters.getLockOptions().getFollowOnLocking() == null && dialect.useFollowOnLocking( parameters ) ) ||
( parameters.getLockOptions().getFollowOnLocking() != null && parameters.getLockOptions().getFollowOnLocking() ) ) {
// currently only one lock mode is allowed in follow-on locking
final LockMode lockMode = determineFollowOnLockMode( parameters.getLockOptions() );
final LockOptions lockOptions = new LockOptions( lockMode );
if ( lockOptions.getLockMode() != LockMode.UPGRADE_SKIPLOCKED ) {
if ( lockOptions.getLockMode() != LockMode.NONE ) {
LOG.usingFollowOnLocking();
}
lockOptions.setTimeOut( parameters.getLockOptions().getTimeOut() );
lockOptions.setScope( parameters.getLockOptions().getScope() );
afterLoadActions.add(
new AfterLoadAction() {
@Override
public void afterLoad(SharedSessionContractImplementor session, Object entity, Loadable persister) {
( (Session) session ).buildLockRequest( lockOptions ).lock(
persister.getEntityName(),
entity
);
}
}
);
parameters.setLockOptions( new LockOptions() );
return true;
}
}
return false;
}
protected LockMode determineFollowOnLockMode(LockOptions lockOptions) {
final LockMode lockModeToUse = lockOptions.findGreatestLockMode();
if ( lockOptions.hasAliasSpecificLockModes() ) {
if ( lockOptions.getLockMode() == LockMode.NONE && lockModeToUse == LockMode.NONE ) {
return lockModeToUse;
}
else {
LOG.aliasSpecificLockingWithFollowOnLocking( lockModeToUse );
}
}
return lockModeToUse;
}
/**
* Execute an SQL query and attempt to instantiate instances of the class mapped by the given
* persister from each row of the ResultSet. If an object is supplied, will attempt to
* initialize that object. If a collection is supplied, attempt to initialize that collection.
*/
public List doQueryAndInitializeNonLazyCollections(
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies) throws HibernateException, SQLException {
return doQueryAndInitializeNonLazyCollections(
session,
queryParameters,
returnProxies,
null
);
}
public List doQueryAndInitializeNonLazyCollections(
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies,
final ResultTransformer forcedResultTransformer)
throws HibernateException, SQLException {
final PersistenceContext persistenceContext = session.getPersistenceContext();
boolean defaultReadOnlyOrig = persistenceContext.isDefaultReadOnly();
if ( queryParameters.isReadOnlyInitialized() ) {
// The read-only/modifiable mode for the query was explicitly set.
// Temporarily set the default read-only/modifiable setting to the query's setting.
persistenceContext.setDefaultReadOnly( queryParameters.isReadOnly() );
}
else {
// The read-only/modifiable setting for the query was not initialized.
// Use the default read-only/modifiable from the persistence context instead.
queryParameters.setReadOnly( persistenceContext.isDefaultReadOnly() );
}
persistenceContext.beforeLoad();
List result;
try {
try {
result = doQuery( session, queryParameters, returnProxies, forcedResultTransformer );
}
finally {
persistenceContext.afterLoad();
}
persistenceContext.initializeNonLazyCollections();
}
finally {
// Restore the original default
persistenceContext.setDefaultReadOnly( defaultReadOnlyOrig );
}
return result;
}
/**
* Loads a single row from the result set. This is the processing used from the
* ScrollableResults where no collection fetches were encountered.
*
* @param resultSet The result set from which to do the load.
* @param session The session from which the request originated.
* @param queryParameters The query parameters specified by the user.
* @param returnProxies Should proxies be generated
*
* @return The loaded "row".
*
* @throws HibernateException
*/
public Object loadSingleRow(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies) throws HibernateException {
final int entitySpan = getEntityPersisters().length;
final List hydratedObjects = entitySpan == 0 ?
null : new ArrayList( entitySpan );
final Object result;
try {
result = getRowFromResultSet(
resultSet,
session,
queryParameters,
getLockModes( queryParameters.getLockOptions() ),
null,
hydratedObjects,
new EntityKey[entitySpan],
returnProxies
);
}
catch (SQLException sqle) {
throw factory.getJdbcServices().getSqlExceptionHelper().convert(
sqle,
"could not read next row of results",
getSQLString()
);
}
initializeEntitiesAndCollections(
hydratedObjects,
resultSet,
session,
queryParameters.isReadOnly( session )
);
session.getPersistenceContextInternal().initializeNonLazyCollections();
return result;
}
private Object sequentialLoad(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies,
final EntityKey keyToRead) throws HibernateException {
final int entitySpan = getEntityPersisters().length;
final List hydratedObjects = entitySpan == 0 ?
null : new ArrayList( entitySpan );
Object result = null;
final EntityKey[] loadedKeys = new EntityKey[entitySpan];
try {
do {
Object loaded = getRowFromResultSet(
resultSet,
session,
queryParameters,
getLockModes( queryParameters.getLockOptions() ),
null,
hydratedObjects,
loadedKeys,
returnProxies
);
if ( !keyToRead.equals( loadedKeys[0] ) ) {
throw new AssertionFailure(
String.format(
"Unexpected key read for row; expected [%s]; actual [%s]",
keyToRead,
loadedKeys[0]
)
);
}
if ( result == null ) {
result = loaded;
}
}
while ( resultSet.next() &&
isCurrentRowForSameEntity( keyToRead, 0, resultSet, session ) );
}
catch (SQLException sqle) {
throw factory.getJdbcServices().getSqlExceptionHelper().convert(
sqle,
"could not doAfterTransactionCompletion sequential read of results (forward)",
getSQLString()
);
}
initializeEntitiesAndCollections(
hydratedObjects,
resultSet,
session,
queryParameters.isReadOnly( session )
);
session.getPersistenceContextInternal().initializeNonLazyCollections();
return result;
}
private boolean isCurrentRowForSameEntity(
final EntityKey keyToRead,
final int persisterIndex,
final ResultSet resultSet,
final SharedSessionContractImplementor session) throws SQLException {
EntityKey currentRowKey = getKeyFromResultSet(
persisterIndex, getEntityPersisters()[persisterIndex], null, resultSet, session
);
return keyToRead.equals( currentRowKey );
}
/**
* Loads a single logical row from the result set moving forward. This is the
* processing used from the ScrollableResults where there were collection fetches
* encountered; thus a single logical row may have multiple rows in the underlying
* result set.
*
* @param resultSet The result set from which to do the load.
* @param session The session from which the request originated.
* @param queryParameters The query parameters specified by the user.
* @param returnProxies Should proxies be generated
*
* @return The loaded "row".
*
* @throws HibernateException
*/
public Object loadSequentialRowsForward(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies) throws HibernateException {
// note that for sequential scrolling, we make the assumption that
// the first persister element is the "root entity"
try {
if ( resultSet.isAfterLast() ) {
// don't even bother trying to read further
return null;
}
if ( resultSet.isBeforeFirst() ) {
resultSet.next();
}
// We call getKeyFromResultSet() here so that we can know the
// key value upon which to perform the breaking logic. However,
// it is also then called from getRowFromResultSet() which is certainly
// not the most efficient. But the call here is needed, and there
// currently is no other way without refactoring of the doQuery()/getRowFromResultSet()
// methods
final EntityKey currentKey = getKeyFromResultSet(
0,
getEntityPersisters()[0],
null,
resultSet,
session
);
return sequentialLoad( resultSet, session, queryParameters, returnProxies, currentKey );
}
catch (SQLException sqle) {
throw factory.getJdbcServices().getSqlExceptionHelper().convert(
sqle,
"could not perform sequential read of results (forward)",
getSQLString()
);
}
}
/**
* Loads a single logical row from the result set moving forward. This is the
* processing used from the ScrollableResults where there were collection fetches
* encountered; thus a single logical row may have multiple rows in the underlying
* result set.
*
* @param resultSet The result set from which to do the load.
* @param session The session from which the request originated.
* @param queryParameters The query parameters specified by the user.
* @param returnProxies Should proxies be generated
*
* @return The loaded "row".
*
* @throws HibernateException
*/
public Object loadSequentialRowsReverse(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies,
final boolean isLogicallyAfterLast) throws HibernateException {
// note that for sequential scrolling, we make the assumption that
// the first persister element is the "root entity"
try {
if ( resultSet.isFirst() ) {
// don't even bother trying to read any further
return null;
}
EntityKey keyToRead = null;
// This check is needed since processing leaves the cursor
// after the last physical row for the current logical row;
// thus if we are after the last physical row, this might be
// caused by either:
// 1) scrolling to the last logical row
// 2) scrolling past the last logical row
// In the latter scenario, the previous logical row
// really is the last logical row.
//
// In all other cases, we should process back two
// logical records (the current logic row, plus the
// previous logical row).
if ( resultSet.isAfterLast() && isLogicallyAfterLast ) {
// position cursor to the last row
resultSet.last();
keyToRead = getKeyFromResultSet(
0,
getEntityPersisters()[0],
null,
resultSet,
session
);
}
else {
// Since the result set cursor is always left at the first
// physical row after the "last processed", we need to jump
// back one position to get the key value we are interested
// in skipping
resultSet.previous();
// sequentially read the result set in reverse until we recognize
// a change in the key value. At that point, we are pointed at
// the last physical sequential row for the logical row in which
// we are interested in processing
boolean firstPass = true;
final EntityKey lastKey = getKeyFromResultSet(
0,
getEntityPersisters()[0],
null,
resultSet,
session
);
while ( resultSet.previous() ) {
EntityKey checkKey = getKeyFromResultSet(
0,
getEntityPersisters()[0],
null,
resultSet,
session
);
if ( firstPass ) {
firstPass = false;
keyToRead = checkKey;
}
if ( !lastKey.equals( checkKey ) ) {
break;
}
}
}
// Read backwards until we read past the first physical sequential
// row with the key we are interested in loading
while ( resultSet.previous() ) {
EntityKey checkKey = getKeyFromResultSet(
0,
getEntityPersisters()[0],
null,
resultSet,
session
);
if ( !keyToRead.equals( checkKey ) ) {
break;
}
}
// Finally, read ahead one row to position result set cursor
// at the first physical row we are interested in loading
resultSet.next();
// and doAfterTransactionCompletion the load
return sequentialLoad( resultSet, session, queryParameters, returnProxies, keyToRead );
}
catch (SQLException sqle) {
throw factory.getJdbcServices().getSqlExceptionHelper().convert(
sqle,
"could not doAfterTransactionCompletion sequential read of results (forward)",
getSQLString()
);
}
}
protected static EntityKey getOptionalObjectKey(QueryParameters queryParameters, SharedSessionContractImplementor session) {
final Object optionalObject = queryParameters.getOptionalObject();
final Serializable optionalId = queryParameters.getOptionalId();
final String optionalEntityName = queryParameters.getOptionalEntityName();
if ( optionalObject != null && optionalEntityName != null ) {
return session.generateEntityKey(
optionalId, session.getEntityPersister(
optionalEntityName,
optionalObject
)
);
}
else {
return null;
}
}
private Object getRowFromResultSet(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final LockMode[] lockModesArray,
final EntityKey optionalObjectKey,
final List hydratedObjects,
final EntityKey[] keys,
boolean returnProxies) throws SQLException, HibernateException {
return getRowFromResultSet(
resultSet,
session,
queryParameters,
lockModesArray,
optionalObjectKey,
hydratedObjects,
keys,
returnProxies,
null
);
}
private Object getRowFromResultSet(
final ResultSet resultSet,
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final LockMode[] lockModesArray,
final EntityKey optionalObjectKey,
final List hydratedObjects,
final EntityKey[] keys,
boolean returnProxies,
ResultTransformer forcedResultTransformer) throws SQLException, HibernateException {
final Loadable[] persisters = getEntityPersisters();
final int entitySpan = persisters.length;
extractKeysFromResultSet(
persisters,
queryParameters,
resultSet,
session,
keys,
lockModesArray,
hydratedObjects
);
registerNonExists( keys, persisters, session );
// this call is side-effecty
Object[] row = getRow(
resultSet,
persisters,
keys,
queryParameters.getOptionalObject(),
optionalObjectKey,
lockModesArray,
hydratedObjects,
session
);
readCollectionElements( row, resultSet, session );
if ( returnProxies ) {
// now get an existing proxy for each row element (if there is one)
final PersistenceContext persistenceContext = session.getPersistenceContextInternal();
for ( int i = 0; i < entitySpan; i++ ) {
Object entity = row[i];
Object proxy = persistenceContext.proxyFor( persisters[i], keys[i], entity );
if ( entity != proxy ) {
// force the proxy to resolve itself
( (HibernateProxy) proxy ).getHibernateLazyInitializer().setImplementation( entity );
row[i] = proxy;
}
}
}
applyPostLoadLocks( row, lockModesArray, session );
return forcedResultTransformer == null
? getResultColumnOrRow( row, queryParameters.getResultTransformer(), resultSet, session )
: forcedResultTransformer.transformTuple(
getResultRow( row, resultSet, session ),
getResultRowAliases()
)
;
}
protected void extractKeysFromResultSet(
Loadable[] persisters,
QueryParameters queryParameters,
ResultSet resultSet,
SharedSessionContractImplementor session,
EntityKey[] keys,
LockMode[] lockModes,
List hydratedObjects) throws SQLException {
final int entitySpan = persisters.length;
final int numberOfPersistersToProcess;
final Serializable optionalId = queryParameters.getOptionalId();
if ( isSingleRowLoader() && optionalId != null ) {
keys[entitySpan - 1] = session.generateEntityKey( optionalId, persisters[entitySpan - 1] );
// skip the last persister below...
numberOfPersistersToProcess = entitySpan - 1;
}
else {
numberOfPersistersToProcess = entitySpan;
}
final Object[] hydratedKeyState = new Object[numberOfPersistersToProcess];
for ( int i = 0; i < numberOfPersistersToProcess; i++ ) {
final Type idType = persisters[i].getIdentifierType();
hydratedKeyState[i] = idType.hydrate(
resultSet,
getEntityAliases()[i].getSuffixedKeyAliases(),
session,
null
);
}
for ( int i = 0; i < numberOfPersistersToProcess; i++ ) {
final Type idType = persisters[i].getIdentifierType();
if ( idType.isComponentType() && getCompositeKeyManyToOneTargetIndices() != null ) {
// we may need to force resolve any key-many-to-one(s)
int[] keyManyToOneTargetIndices = getCompositeKeyManyToOneTargetIndices()[i];
// todo : better solution is to order the index processing based on target indices
// that would account for multiple levels whereas this scheme does not
if ( keyManyToOneTargetIndices != null ) {
for ( int targetIndex : keyManyToOneTargetIndices ) {
if ( targetIndex < numberOfPersistersToProcess ) {
final Type targetIdType = persisters[targetIndex].getIdentifierType();
final Serializable targetId = (Serializable) targetIdType.resolve(
hydratedKeyState[targetIndex],
session,
null
);
// todo : need a way to signal that this key is resolved and its data resolved
keys[targetIndex] = session.generateEntityKey( targetId, persisters[targetIndex] );
}
// this part copied from #getRow, this section could be refactored out
Object object = session.getEntityUsingInterceptor( keys[targetIndex] );
if ( object != null ) {
//its already loaded so don't need to hydrate it
instanceAlreadyLoaded(
resultSet,
targetIndex,
persisters[targetIndex],
keys[targetIndex],
object,
lockModes[targetIndex],
hydratedObjects,
session
);
}
else {
instanceNotYetLoaded(
resultSet,
targetIndex,
persisters[targetIndex],
getEntityAliases()[targetIndex].getRowIdAlias(),
keys[targetIndex],
lockModes[targetIndex],
getOptionalObjectKey( queryParameters, session ),
queryParameters.getOptionalObject(),
hydratedObjects,
session
);
}
}
}
}
// If hydratedKeyState[i] is null, then we know the association should be null.
// Don't bother resolving the ID if hydratedKeyState[i] is null.
// Implementation note: if the ID is a composite ID, then resolving a null value will
// result in instantiating an empty composite if AvailableSettings#CREATE_EMPTY_COMPOSITES_ENABLED
// is true. By not resolving a null value for a composite ID, we avoid the overhead of instantiating
// an empty composite, checking if it is equivalent to null (it should be), then ultimately throwing
// out the empty value.
final Serializable resolvedId;
if ( hydratedKeyState[i] != null ) {
resolvedId = (Serializable) idType.resolve( hydratedKeyState[i], session, null );
}
else {
resolvedId = null;
}
keys[i] = resolvedId == null ? null : session.generateEntityKey( resolvedId, persisters[i] );
}
}
protected void applyPostLoadLocks(Object[] row, LockMode[] lockModesArray, SharedSessionContractImplementor session) {
}
/**
* Read any collection elements contained in a single row of the result set
*/
private void readCollectionElements(Object[] row, ResultSet resultSet, SharedSessionContractImplementor session)
throws SQLException, HibernateException {
//TODO: make this handle multiple collection roles!
final CollectionPersister[] collectionPersisters = getCollectionPersisters();
if ( collectionPersisters != null ) {
final CollectionAliases[] descriptors = getCollectionAliases();
final int[] collectionOwners = getCollectionOwners();
for ( int i = 0; i < collectionPersisters.length; i++ ) {
final boolean hasCollectionOwners = collectionOwners != null &&
collectionOwners[i] > -1;
//true if this is a query and we are loading multiple instances of the same collection role
//otherwise this is a CollectionInitializer and we are loading up a single collection or batch
final Object owner = hasCollectionOwners ?
row[collectionOwners[i]] :
null; //if null, owner will be retrieved from session
final CollectionPersister collectionPersister = collectionPersisters[i];
final Serializable key;
if ( owner == null ) {
key = null;
}
else {
key = collectionPersister.getCollectionType().getKeyOfOwner( owner, session );
//TODO: old version did not require hashmap lookup:
//keys[collectionOwner].getIdentifier()
}
readCollectionElement(
owner,
key,
collectionPersister,
descriptors[i],
resultSet,
session
);
}
}
}
private List doQuery(
final SharedSessionContractImplementor session,
final QueryParameters queryParameters,
final boolean returnProxies,
final ResultTransformer forcedResultTransformer) throws SQLException, HibernateException {
final RowSelection selection = queryParameters.getRowSelection();
final int maxRows = LimitHelper.hasMaxRows( selection ) ?
selection.getMaxRows() :
Integer.MAX_VALUE;
final List afterLoadActions = new ArrayList();
final SqlStatementWrapper wrapper = executeQueryStatement( queryParameters, false, afterLoadActions, session );
final ResultSet rs = wrapper.getResultSet();
final Statement st = wrapper.getStatement();
// would be great to move all this below here into another method that could also be used
// from the new scrolling stuff.
//
// Would need to change the way the max-row stuff is handled (i.e. behind an interface) so
// that I could do the control breaking at the means to know when to stop
try {
return processResultSet(
rs,
queryParameters,
session,
returnProxies,
forcedResultTransformer,
maxRows,
afterLoadActions
);
}
finally {
final JdbcCoordinator jdbcCoordinator = session.getJdbcCoordinator();
jdbcCoordinator.getLogicalConnection().getResourceRegistry().release( st );
jdbcCoordinator.afterStatementExecution();
}
}
protected List processResultSet(
ResultSet rs,
QueryParameters queryParameters,
SharedSessionContractImplementor session,
boolean returnProxies,
ResultTransformer forcedResultTransformer,
int maxRows,
List afterLoadActions) throws SQLException {
final int entitySpan = getEntityPersisters().length;
final boolean createSubselects = isSubselectLoadingEnabled();
final List subselectResultKeys = createSubselects ? new ArrayList<>() : null;
final List