org.datanucleus.store.rdbms.query.ScrollableQueryResult Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of datanucleus-rdbms Show documentation
Show all versions of datanucleus-rdbms Show documentation
Plugin for DataNucleus providing persistence to RDBMS datastores.
The newest version!
/**********************************************************************
Copyright (c) 2005 Erik Bengtson and others. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
2005 Andy Jefferson - added support for bidirectional iterator
2008 Andy Jefferson - added resultCacheType. Removed optimistic restriction
2008 Marco Schulze - implemented toArray() and toArray(Object[])
...
**********************************************************************/
package org.datanucleus.store.rdbms.query;
import java.io.ObjectStreamException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.NoSuchElementException;
import org.datanucleus.ExecutionContext;
import org.datanucleus.FetchPlan;
import org.datanucleus.exceptions.NucleusDataStoreException;
import org.datanucleus.exceptions.NucleusException;
import org.datanucleus.exceptions.NucleusUserException;
import org.datanucleus.metadata.AbstractMemberMetaData;
import org.datanucleus.state.DNStateManager;
import org.datanucleus.store.query.AbstractQueryResultIterator;
import org.datanucleus.store.query.Query;
import org.datanucleus.store.rdbms.JDBCUtils;
import org.datanucleus.store.types.SCOUtils;
import org.datanucleus.util.ConcurrentReferenceHashMap;
import org.datanucleus.util.Localiser;
import org.datanucleus.util.NucleusLogger;
import org.datanucleus.util.ConcurrentReferenceHashMap.ReferenceType;
/**
* Lazy collection results from a Query with the ResultSet scrollable.
* Supports the following query extensions (in addition to those supported by superclasses) :-
*
* - datanucleus.query.resultCacheType Type of caching of result objects.
* Supports strong, weak, soft, none
*
* If there is no transaction present, or if the FetchPlan is in "greedy" mode, and where caching is being used
* will load all results at startup. Otherwise results are only loaded when accessed.
*/
public final class ScrollableQueryResult extends AbstractRDBMSQueryResult implements java.io.Serializable
{
/** Map of ResultSet object values, keyed by the list index ("0", "1", etc). */
private Map resultsObjsByIndex = null;
/** Map of persistable object id, keyed by the result position (0, 1, 2, ...). */
protected Map resultIdsByPosition = null;
/** Position of first result (origin=0). */
int startIndex = 0;
/** Position of last result (origin=0, set when known). */
int endIndex = -1;
/**
* Constructor of the result from a Query.
* @param query The Query
* @param rof The factory to retrieve results from
* @param rs The ResultSet from the Query Statement
* @param fp FetchPlan
* @param candidates the Candidates collection. Pass this argument only when distinct = false
*/
public ScrollableQueryResult(Query query, ResultObjectFactory rof, ResultSet rs, FetchPlan fp, Collection candidates)
{
super(query, rof, rs, fp);
if (candidates != null)
{
//TODO support this feature
throw new NucleusException("Unsupported Feature: Candidate Collection is only allowed using ForwardQueryResult").setFatal();
}
if (query.useResultsCaching() && rof instanceof PersistentClassROF) // Only cache result ids when performing a candidate query
{
resultIdsByPosition = new HashMap<>();
}
// Process any supported extensions
String ext = (String)query.getExtension(Query.EXTENSION_RESULT_CACHE_TYPE);
if (ext != null)
{
ext = ext.toLowerCase();
if (ext.equals("soft"))
{
resultsObjsByIndex = new ConcurrentReferenceHashMap<>(1, ReferenceType.STRONG, ReferenceType.SOFT);
}
else if (ext.equals("weak"))
{
resultsObjsByIndex = new ConcurrentReferenceHashMap<>(1, ReferenceType.STRONG, ReferenceType.WEAK);
}
else if (ext.equals("strong"))
{
resultsObjsByIndex = new HashMap<>();
}
else if (ext.equals("none"))
{
resultsObjsByIndex = null;
}
else
{
resultsObjsByIndex = new ConcurrentReferenceHashMap<>(1, ReferenceType.STRONG, ReferenceType.WEAK);
}
}
else
{
resultsObjsByIndex = new ConcurrentReferenceHashMap<>(1, ReferenceType.STRONG, ReferenceType.WEAK);
}
if (applyRangeChecks)
{
startIndex = (int) query.getRangeFromIncl();
}
}
public void initialise()
{
if (resultsObjsByIndex != null)
{
// Caching results so load up any result objects needed right now
int fetchSize = query.getFetchPlan().getFetchSize();
if (fetchSize == FetchPlan.FETCH_SIZE_GREEDY)
{
// "greedy" mode, so load all results now
loadObjects(startIndex, -1);
// Cache the query results
cacheQueryResults();
}
else if (fetchSize > 0)
{
// Load up the first "fetchSize" objects now
loadObjects(startIndex, fetchSize);
}
}
}
/**
* Convenience method to load up rows starting at the specified position.
* Optionally takes a maximum number of rows to process.
* @param start Start row
* @param maxNumber Max number to process (-1 means no maximum)
*/
protected void loadObjects(int start, int maxNumber)
{
int index = start;
boolean hasMoreResults = true;
while (hasMoreResults)
{
if (maxNumber >= 0 && index == (maxNumber+start))
{
// Maximum specified, and already loaded the required number of results
hasMoreResults = false;
}
else if (applyRangeChecks && index >= query.getRangeToExcl())
{
// Reached end of allowed range
size = (int) (query.getRangeToExcl()-query.getRangeFromIncl());
hasMoreResults = false;
}
else
{
try
{
boolean rowExists = rs.absolute(index+1);
if (!rowExists)
{
hasMoreResults = false;
size = index; // We know the size now so store for later use
if (applyRangeChecks && index < query.getRangeToExcl())
{
size = (int) (index - query.getRangeFromIncl());
}
endIndex = index-1;
}
else
{
getObjectForIndex(index);
index++;
}
}
catch (SQLException sqle)
{
// TODO Handle this
}
}
}
}
/**
* Accessor for the result object at an index.
* If the object has already been processed will return that object,
* otherwise will retrieve the object using the factory.
* @param index The list index position
* @return The result object
*/
protected E getObjectForIndex(int index)
{
if (resultsObjsByIndex != null)
{
// Caching objects, so check the cache for this index
E obj = resultsObjsByIndex.get(index);
if (obj != null)
{
// Already retrieved so return it
return obj;
}
}
if (rs == null)
{
throw new NucleusUserException("Results for query have already been closed. Perhaps you called flush(), closed the query, or ended a transaction");
}
try
{
// ResultSet is numbered 1, 2, ... N
// List is indexed 0, 1, 2, ... N-1
rs.absolute(index+1);
E obj = rof.getObject();
JDBCUtils.logWarnings(rs);
// Process any bulk loaded members
if (bulkLoadedValueByMemberNumber != null)
{
ExecutionContext ec = query.getExecutionContext();
Map memberValues = bulkLoadedValueByMemberNumber.get(api.getIdForObject(obj));
DNStateManager sm = ec.findStateManager(obj);
Collection bulkMmds = new HashSet<>(bulkLoadedMmds);
if (memberValues != null)
{
Iterator> memberValIter = memberValues.entrySet().iterator();
while (memberValIter.hasNext())
{
Map.Entry memberValueEntry = memberValIter.next();
sm.replaceField(memberValueEntry.getKey(), memberValueEntry.getValue());
bulkMmds.remove(sm.getClassMetaData().getMetaDataForManagedMemberAtAbsolutePosition(memberValueEntry.getKey()));
}
}
if (!bulkMmds.isEmpty())
{
for (AbstractMemberMetaData bulkMmd : bulkMmds)
{
if (bulkMmd.hasCollection())
{
try
{
Class> instanceType = SCOUtils.getContainerInstanceType(bulkMmd.getType(), bulkMmd.getOrderMetaData() != null);
Collection coll = (Collection