org.apache.solr.search.SolrIndexSearcher Maven / Gradle / Ivy
Show all versions of solr-core Show documentation
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.collect.Iterables;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.ExitableDirectoryReader;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiPostingsEnum;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.*;
import org.apache.lucene.search.TotalHits.Relation;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.ObjectReleaseTracker;
import org.apache.solr.core.DirectoryFactory;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrInfoBean;
import org.apache.solr.index.SlowCompositeReaderWrapper;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricProducer;
import org.apache.solr.metrics.SolrMetricsContext;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.facet.UnInvertedField;
import org.apache.solr.search.stats.StatsCache;
import org.apache.solr.search.stats.StatsSource;
import org.apache.solr.uninverting.UninvertingReader;
import org.apache.solr.update.IndexFingerprint;
import org.apache.solr.update.SolrIndexConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* SolrIndexSearcher adds schema awareness and caching functionality over {@link IndexSearcher}.
*
* @since solr 0.9
*/
public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrInfoBean, SolrMetricProducer {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String STATS_SOURCE = "org.apache.solr.stats_source";
public static final String STATISTICS_KEY = "searcher";
// These should *only* be used for debugging or monitoring purposes
public static final AtomicLong numOpens = new AtomicLong();
public static final AtomicLong numCloses = new AtomicLong();
@SuppressWarnings({"rawtypes"})
private static final Map NO_GENERIC_CACHES = Collections.emptyMap();
@SuppressWarnings({"rawtypes"})
private static final SolrCache[] NO_CACHES = new SolrCache[0];
private final SolrCore core;
private final IndexSchema schema;
private final SolrDocumentFetcher docFetcher;
private final String name;
private final Date openTime = new Date();
private final long openNanoTime = System.nanoTime();
private Date registerTime;
private long warmupTime = 0;
private final DirectoryReader reader;
private final boolean closeReader;
private final int queryResultWindowSize;
private final int queryResultMaxDocsCached;
private final boolean useFilterForSortedQuery;
private final boolean cachingEnabled;
private final SolrCache filterCache;
private final SolrCache queryResultCache;
private final SolrCache fieldValueCache;
// map of generic caches - not synchronized since it's read-only after the constructor.
@SuppressWarnings({"rawtypes"})
private final Map cacheMap;
// list of all caches associated with this searcher.
@SuppressWarnings({"rawtypes"})
private final SolrCache[] cacheList;
private DirectoryFactory directoryFactory;
private final LeafReader leafReader;
// only for addIndexes etc (no fieldcache)
private final DirectoryReader rawReader;
private final String path;
private boolean releaseDirectory;
private final StatsCache statsCache;
private Set metricNames = ConcurrentHashMap.newKeySet();
private SolrMetricsContext solrMetricsContext;
private static DirectoryReader getReader(SolrCore core, SolrIndexConfig config, DirectoryFactory directoryFactory,
String path) throws IOException {
final Directory dir = directoryFactory.get(path, DirContext.DEFAULT, config.lockType);
try {
return core.getIndexReaderFactory().newReader(dir, core);
} catch (Exception e) {
directoryFactory.release(dir);
throw new SolrException(ErrorCode.SERVER_ERROR, "Error opening Reader", e);
}
}
// TODO: wrap elsewhere and return a "map" from the schema that overrides get() ?
// this reader supports reopen
private static DirectoryReader wrapReader(SolrCore core, DirectoryReader reader) throws IOException {
assert reader != null;
return ExitableDirectoryReader.wrap(
UninvertingReader.wrap(reader, core.getLatestSchema().getUninversionMapper()),
SolrQueryTimeoutImpl.getInstance());
}
/**
* Builds the necessary collector chain (via delegate wrapping) and executes the query against it. This method takes
* into consideration both the explicitly provided collector and postFilter as well as any needed collector wrappers
* for dealing with options specified in the QueryCommand.
* @return The collector used for search
*/
private Collector buildAndRunCollectorChain(QueryResult qr, Query query, Collector collector, QueryCommand cmd,
DelegatingCollector postFilter) throws IOException {
EarlyTerminatingSortingCollector earlyTerminatingSortingCollector = null;
if (cmd.getSegmentTerminateEarly()) {
final Sort cmdSort = cmd.getSort();
final int cmdLen = cmd.getLen();
final Sort mergeSort = core.getSolrCoreState().getMergePolicySort();
if (cmdSort == null || cmdLen <= 0 || mergeSort == null ||
!EarlyTerminatingSortingCollector.canEarlyTerminate(cmdSort, mergeSort)) {
log.warn("unsupported combination: segmentTerminateEarly=true cmdSort={} cmdLen={} mergeSort={}", cmdSort, cmdLen, mergeSort);
} else {
collector = earlyTerminatingSortingCollector = new EarlyTerminatingSortingCollector(collector, cmdSort, cmd.getLen());
}
}
final boolean terminateEarly = cmd.getTerminateEarly();
if (terminateEarly) {
collector = new EarlyTerminatingCollector(collector, cmd.getLen());
}
final long timeAllowed = cmd.getTimeAllowed();
if (timeAllowed > 0) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), timeAllowed);
}
if (postFilter != null) {
postFilter.setLastDelegate(collector);
collector = postFilter;
}
try {
super.search(query, collector);
} catch (TimeLimitingCollector.TimeExceededException | ExitableDirectoryReader.ExitingReaderException x) {
log.warn("Query: [{}]; {}", query, x.getMessage());
qr.setPartialResults(true);
} catch (EarlyTerminatingCollectorException etce) {
if (collector instanceof DelegatingCollector) {
((DelegatingCollector) collector).finish();
}
throw etce;
} finally {
if (earlyTerminatingSortingCollector != null) {
qr.setSegmentTerminatedEarly(earlyTerminatingSortingCollector.terminatedEarly());
}
}
if (collector instanceof DelegatingCollector) {
((DelegatingCollector) collector).finish();
}
return collector;
}
public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, SolrIndexConfig config, String name,
boolean enableCache, DirectoryFactory directoryFactory) throws IOException {
// We don't need to reserve the directory because we get it from the factory
this(core, path, schema, name, getReader(core, config, directoryFactory, path), true, enableCache, false,
directoryFactory);
// Release the directory at close.
this.releaseDirectory = true;
}
@SuppressWarnings({"unchecked", "rawtypes"})
public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, String name, DirectoryReader r,
boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory)
throws IOException {
super(wrapReader(core, r));
this.path = path;
this.directoryFactory = directoryFactory;
this.reader = (DirectoryReader) super.readerContext.reader();
this.rawReader = r;
this.leafReader = SlowCompositeReaderWrapper.wrap(this.reader);
this.core = core;
this.statsCache = core.createStatsCache();
this.schema = schema;
this.name = "Searcher@" + Integer.toHexString(hashCode()) + "[" + core.getName() + "]"
+ (name != null ? " " + name : "");
log.debug("Opening [{}]", this.name);
if (directoryFactory.searchersReserveCommitPoints()) {
// reserve commit point for life of searcher
// TODO: This may not be safe w/softCommit, see SOLR-13908
core.getDeletionPolicy().saveCommitPoint(reader.getIndexCommit().getGeneration());
}
if (reserveDirectory) {
// Keep the directory from being released while we use it.
directoryFactory.incRef(getIndexReader().directory());
// Make sure to release it when closing.
this.releaseDirectory = true;
}
this.closeReader = closeReader;
setSimilarity(schema.getSimilarity());
final SolrConfig solrConfig = core.getSolrConfig();
this.queryResultWindowSize = solrConfig.queryResultWindowSize;
this.queryResultMaxDocsCached = solrConfig.queryResultMaxDocsCached;
this.useFilterForSortedQuery = solrConfig.useFilterForSortedQuery;
this.docFetcher = new SolrDocumentFetcher(this, solrConfig, enableCache);
this.cachingEnabled = enableCache;
if (cachingEnabled) {
final ArrayList clist = new ArrayList<>();
fieldValueCache = solrConfig.fieldValueCacheConfig == null ? null
: solrConfig.fieldValueCacheConfig.newInstance();
if (fieldValueCache != null) clist.add(fieldValueCache);
filterCache = solrConfig.filterCacheConfig == null ? null : solrConfig.filterCacheConfig.newInstance();
if (filterCache != null) clist.add(filterCache);
queryResultCache = solrConfig.queryResultCacheConfig == null ? null
: solrConfig.queryResultCacheConfig.newInstance();
if (queryResultCache != null) clist.add(queryResultCache);
SolrCache documentCache = docFetcher.getDocumentCache();
if (documentCache != null) clist.add(documentCache);
if (solrConfig.userCacheConfigs.isEmpty()) {
cacheMap = NO_GENERIC_CACHES;
} else {
cacheMap = new HashMap<>(solrConfig.userCacheConfigs.size());
for (Map.Entry e : solrConfig.userCacheConfigs.entrySet()) {
SolrCache cache = e.getValue().newInstance();
if (cache != null) {
cacheMap.put(cache.name(), cache);
clist.add(cache);
}
}
}
cacheList = clist.toArray(new SolrCache[clist.size()]);
} else {
this.filterCache = null;
this.queryResultCache = null;
this.fieldValueCache = null;
this.cacheMap = NO_GENERIC_CACHES;
this.cacheList = NO_CACHES;
}
// We already have our own filter cache
setQueryCache(null);
// do this at the end since an exception in the constructor means we won't close
numOpens.incrementAndGet();
assert ObjectReleaseTracker.track(this);
}
public SolrDocumentFetcher getDocFetcher() {
return docFetcher;
}
List getLeafContexts() {
return super.leafContexts;
}
public StatsCache getStatsCache() {
return statsCache;
}
public FieldInfos getFieldInfos() {
return leafReader.getFieldInfos();
}
/*
* Override these two methods to provide a way to use global collection stats.
*/
@Override
public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) throws IOException {
final SolrRequestInfo reqInfo = SolrRequestInfo.getRequestInfo();
if (reqInfo != null) {
final StatsSource statsSrc = (StatsSource) reqInfo.getReq().getContext().get(STATS_SOURCE);
if (statsSrc != null) {
return statsSrc.termStatistics(this, term, docFreq, totalTermFreq);
}
}
return localTermStatistics(term, docFreq, totalTermFreq);
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
final SolrRequestInfo reqInfo = SolrRequestInfo.getRequestInfo();
if (reqInfo != null) {
final StatsSource statsSrc = (StatsSource) reqInfo.getReq().getContext().get(STATS_SOURCE);
if (statsSrc != null) {
return statsSrc.collectionStatistics(this, field);
}
}
return localCollectionStatistics(field);
}
public TermStatistics localTermStatistics(Term term, int docFreq, long totalTermFreq) throws IOException {
return super.termStatistics(term, docFreq, totalTermFreq);
}
public CollectionStatistics localCollectionStatistics(String field) throws IOException {
// Could call super.collectionStatistics(field); but we can use a cached MultiTerms
assert field != null;
// SlowAtomicReader has a cache of MultiTerms
Terms terms = getSlowAtomicReader().terms(field);
if (terms == null) {
return null;
}
return new CollectionStatistics(field, reader.maxDoc(),
terms.getDocCount(), terms.getSumTotalTermFreq(), terms.getSumDocFreq());
}
public boolean isCachingEnabled() {
return cachingEnabled;
}
public String getPath() {
return path;
}
@Override
public String toString() {
return name + "{" + reader + "}";
}
public SolrCore getCore() {
return core;
}
public final int maxDoc() {
return reader.maxDoc();
}
public final int numDocs() {
return reader.numDocs();
}
public final int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
/**
* Not recommended to call this method unless there is some particular reason due to internally calling {@link SlowCompositeReaderWrapper}.
* Use {@link IndexSearcher#leafContexts} to get the sub readers instead of using this method.
*/
public final LeafReader getSlowAtomicReader() {
return leafReader;
}
/** Raw reader (no fieldcaches etc). Useful for operations like addIndexes */
public final DirectoryReader getRawReader() {
return rawReader;
}
@Override
public final DirectoryReader getIndexReader() {
assert reader == super.getIndexReader();
return reader;
}
/**
* Register sub-objects such as caches and our own metrics
*/
public void register() {
final Map infoRegistry = core.getInfoRegistry();
// register self
infoRegistry.put(STATISTICS_KEY, this);
infoRegistry.put(name, this);
for (@SuppressWarnings({"rawtypes"})SolrCache cache : cacheList) {
cache.setState(SolrCache.State.LIVE);
infoRegistry.put(cache.name(), cache);
}
this.solrMetricsContext = core.getSolrMetricsContext().getChildContext(this);
for (@SuppressWarnings({"rawtypes"})SolrCache cache : cacheList) {
// XXX use the deprecated method for back-compat. remove in 9.0
cache.initializeMetrics(solrMetricsContext.metricManager,
solrMetricsContext.registry, solrMetricsContext.tag, SolrMetricManager.mkName(cache.name(), STATISTICS_KEY));
}
initializeMetrics(solrMetricsContext, STATISTICS_KEY);
registerTime = new Date();
}
/**
* Free's resources associated with this searcher.
*
* In particular, the underlying reader and any cache's in use are closed.
*/
@Override
public void close() throws IOException {
if (log.isDebugEnabled()) {
if (cachingEnabled) {
final StringBuilder sb = new StringBuilder();
sb.append("Closing ").append(name);
for (@SuppressWarnings({"rawtypes"})SolrCache cache : cacheList) {
sb.append("\n\t");
sb.append(cache);
}
log.debug("{}", sb);
} else {
log.debug("Closing [{}]", name);
}
}
core.getInfoRegistry().remove(name);
// super.close();
// can't use super.close() since it just calls reader.close() and that may only be called once
// per reader (even if incRef() was previously called).
long cpg = reader.getIndexCommit().getGeneration();
try {
if (closeReader) rawReader.decRef();
} catch (Exception e) {
SolrException.log(log, "Problem dec ref'ing reader", e);
}
if (directoryFactory.searchersReserveCommitPoints()) {
core.getDeletionPolicy().releaseCommitPoint(cpg);
}
for (@SuppressWarnings({"rawtypes"})SolrCache cache : cacheList) {
try {
cache.close();
} catch (Exception e) {
SolrException.log(log, "Exception closing cache " + cache.name(), e);
}
}
if (releaseDirectory) {
directoryFactory.release(getIndexReader().directory());
}
// do this at the end so it only gets done if there are no exceptions
numCloses.incrementAndGet();
assert ObjectReleaseTracker.release(this);
}
/** Direct access to the IndexSchema for use with this searcher */
public IndexSchema getSchema() {
return schema;
}
/**
* Returns a collection of all field names the index reader knows about.
*/
public Iterable getFieldNames() {
return Iterables.transform(getFieldInfos(), fieldInfo -> fieldInfo.name);
}
public SolrCache getFilterCache() {
return filterCache;
}
//
// Set default regenerators on filter and query caches if they don't have any
//
public static void initRegenerators(SolrConfig solrConfig) {
if (solrConfig.fieldValueCacheConfig != null && solrConfig.fieldValueCacheConfig.getRegenerator() == null) {
solrConfig.fieldValueCacheConfig.setRegenerator(new CacheRegenerator() {
@Override
public boolean regenerateItem(SolrIndexSearcher newSearcher,
@SuppressWarnings({"rawtypes"})SolrCache newCache,
@SuppressWarnings({"rawtypes"})SolrCache oldCache,
Object oldKey, Object oldVal) throws IOException {
if (oldVal instanceof UnInvertedField) {
UnInvertedField.getUnInvertedField((String) oldKey, newSearcher);
}
return true;
}
});
}
if (solrConfig.filterCacheConfig != null && solrConfig.filterCacheConfig.getRegenerator() == null) {
solrConfig.filterCacheConfig.setRegenerator(new CacheRegenerator() {
@Override
public boolean regenerateItem(SolrIndexSearcher newSearcher
, @SuppressWarnings({"rawtypes"})SolrCache newCache
, @SuppressWarnings({"rawtypes"})SolrCache oldCache,
Object oldKey, Object oldVal) throws IOException {
newSearcher.cacheDocSet((Query) oldKey, null, false);
return true;
}
});
}
if (solrConfig.queryResultCacheConfig != null && solrConfig.queryResultCacheConfig.getRegenerator() == null) {
final int queryResultWindowSize = solrConfig.queryResultWindowSize;
solrConfig.queryResultCacheConfig.setRegenerator(new CacheRegenerator() {
@Override
@SuppressWarnings({"rawtypes"})
public boolean regenerateItem(SolrIndexSearcher newSearcher, SolrCache newCache, SolrCache oldCache,
Object oldKey, Object oldVal) throws IOException {
QueryResultKey key = (QueryResultKey) oldKey;
int nDocs = 1;
// request 1 doc and let caching round up to the next window size...
// unless the window size is <=1, in which case we will pick
// the minimum of the number of documents requested last time and
// a reasonable number such as 40.
// TODO: make more configurable later...
if (queryResultWindowSize <= 1) {
DocList oldList = (DocList) oldVal;
int oldnDocs = oldList.offset() + oldList.size();
// 40 has factors of 2,4,5,10,20
nDocs = Math.min(oldnDocs, 40);
}
int flags = NO_CHECK_QCACHE | key.nc_flags;
QueryCommand qc = new QueryCommand();
qc.setQuery(key.query)
.setFilterList(key.filters)
.setSort(key.sort)
.setLen(nDocs)
.setSupersetMaxDoc(nDocs)
.setFlags(flags);
QueryResult qr = new QueryResult();
newSearcher.getDocListC(qr, qc);
return true;
}
});
}
}
public QueryResult search(QueryResult qr, QueryCommand cmd) throws IOException {
getDocListC(qr, cmd);
return qr;
}
// FIXME: This option has been dead/noop since 3.1, should we re-enable or remove it?
// public Hits search(Query query, Filter filter, Sort sort) throws IOException {
// // todo - when Solr starts accepting filters, need to
// // change this conditional check (filter!=null) and create a new filter
// // that ANDs them together if it already exists.
//
// if (optimizer==null || filter!=null || !(query instanceof BooleanQuery)
// ) {
// return super.search(query,filter,sort);
// } else {
// Query[] newQuery = new Query[1];
// Filter[] newFilter = new Filter[1];
// optimizer.optimize((BooleanQuery)query, this, 0, newQuery, newFilter);
//
// return super.search(newQuery[0], newFilter[0], sort);
// }
// }
/**
* Retrieve the {@link Document} instance corresponding to the document id.
*
* @see SolrDocumentFetcher
*/
@Override
public Document doc(int docId) throws IOException {
return doc(docId, (Set) null);
}
/**
* Visit a document's fields using a {@link StoredFieldVisitor}.
* This method does not currently add to the Solr document cache.
*
* @see IndexReader#document(int, StoredFieldVisitor)
* @see SolrDocumentFetcher
*/
@Override
public final void doc(int docId, StoredFieldVisitor visitor) throws IOException {
getDocFetcher().doc(docId, visitor);
}
/**
* Retrieve the {@link Document} instance corresponding to the document id.
*
* NOTE: the document will have all fields accessible, but if a field filter is provided, only the provided
* fields will be loaded (the remainder will be available lazily).
*
* @see SolrDocumentFetcher
*/
@Override
public final Document doc(int i, Set fields) throws IOException {
return getDocFetcher().doc(i, fields);
}
/** expert: internal API, subject to change */
public SolrCache getFieldValueCache() {
return fieldValueCache;
}
/** Returns a weighted sort according to this searcher */
public Sort weightSort(Sort sort) throws IOException {
return (sort != null) ? sort.rewrite(this) : null;
}
/** Returns a weighted sort spec according to this searcher */
public SortSpec weightSortSpec(SortSpec originalSortSpec, Sort nullEquivalent) throws IOException {
return implWeightSortSpec(
originalSortSpec.getSort(),
originalSortSpec.getCount(),
originalSortSpec.getOffset(),
nullEquivalent);
}
/** Returns a weighted sort spec according to this searcher */
private SortSpec implWeightSortSpec(Sort originalSort, int num, int offset, Sort nullEquivalent) throws IOException {
Sort rewrittenSort = weightSort(originalSort);
if (rewrittenSort == null) {
rewrittenSort = nullEquivalent;
}
final SortField[] rewrittenSortFields = rewrittenSort.getSort();
final SchemaField[] rewrittenSchemaFields = new SchemaField[rewrittenSortFields.length];
for (int ii = 0; ii < rewrittenSortFields.length; ++ii) {
final String fieldName = rewrittenSortFields[ii].getField();
rewrittenSchemaFields[ii] = (fieldName == null ? null : schema.getFieldOrNull(fieldName));
}
return new SortSpec(rewrittenSort, rewrittenSchemaFields, num, offset);
}
/**
* Returns the first document number containing the term t
Returns -1 if no document was found. This
* method is primarily intended for clients that want to fetch documents using a unique identifier."
*
* @return the first document number containing the term
*/
public int getFirstMatch(Term t) throws IOException {
long pair = lookupId(t.field(), t.bytes());
if (pair == -1) {
return -1;
} else {
final int segIndex = (int) (pair >> 32);
final int segDocId = (int) pair;
return leafContexts.get(segIndex).docBase + segDocId;
}
}
/**
* lookup the docid by the unique key field, and return the id *within* the leaf reader in the low 32 bits, and the
* index of the leaf reader in the high 32 bits. -1 is returned if not found.
*
* @lucene.internal
*/
public long lookupId(BytesRef idBytes) throws IOException {
return lookupId(schema.getUniqueKeyField().getName(), idBytes);
}
private long lookupId(String field, BytesRef idBytes) throws IOException {
for (int i = 0, c = leafContexts.size(); i < c; i++) {
final LeafReaderContext leaf = leafContexts.get(i);
final LeafReader reader = leaf.reader();
final Terms terms = reader.terms(field);
if (terms == null) continue;
TermsEnum te = terms.iterator();
if (te.seekExact(idBytes)) {
PostingsEnum docs = te.postings(null, PostingsEnum.NONE);
docs = BitsFilteredPostingsEnum.wrap(docs, reader.getLiveDocs());
int id = docs.nextDoc();
if (id == DocIdSetIterator.NO_MORE_DOCS) continue;
assert docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
return (((long) i) << 32) | id;
}
}
return -1;
}
/**
* Compute and cache the DocSet that matches a query. The normal usage is expected to be cacheDocSet(myQuery,
* null,false) meaning that Solr will determine if the Query warrants caching, and if so, will compute the DocSet that
* matches the Query and cache it. If the answer to the query is already cached, nothing further will be done.
*
* If the optionalAnswer DocSet is provided, it should *not* be modified after this call.
*
* @param query
* the lucene query that will act as the key
* @param optionalAnswer
* the DocSet to be cached - if null, it will be computed.
* @param mustCache
* if true, a best effort will be made to cache this entry. if false, heuristics may be used to determine if
* it should be cached.
*/
public void cacheDocSet(Query query, DocSet optionalAnswer, boolean mustCache) throws IOException {
// Even if the cache is null, still compute the DocSet as it may serve to warm the Lucene
// or OS disk cache.
if (optionalAnswer != null) {
if (filterCache != null) {
filterCache.put(query, optionalAnswer);
}
return;
}
// Throw away the result, relying on the fact that getDocSet
// will currently always cache what it found. If getDocSet() starts
// using heuristics about what to cache, and mustCache==true, (or if we
// want this method to start using heuristics too) then
// this needs to change.
getDocSet(query);
}
private BitDocSet makeBitDocSet(DocSet answer) {
// TODO: this should be implemented in DocSet, most likely with a getBits method that takes a maxDoc argument
// or make DocSet instances remember maxDoc
if (answer instanceof BitDocSet) {
return (BitDocSet) answer;
}
FixedBitSet bs = new FixedBitSet(maxDoc());
DocIterator iter = answer.iterator();
while (iter.hasNext()) {
bs.set(iter.nextDoc());
}
return new BitDocSet(bs, answer.size());
}
public BitDocSet getDocSetBits(Query q) throws IOException {
DocSet answer = getDocSet(q);
BitDocSet answerBits = makeBitDocSet(answer);
if (answerBits != answer && filterCache != null) {
filterCache.put(q, answerBits);
}
return answerBits;
}
/**
* Returns the set of document ids matching a query. This method is cache-aware and attempts to retrieve the answer
* from the cache if possible. If the answer was not cached, it may have been inserted into the cache as a result of
* this call. This method can handle negative queries.
*
* The DocSet returned should not be modified.
*/
public DocSet getDocSet(Query query) throws IOException {
if (query instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery) query;
if (!eq.getCache()) {
if (query instanceof WrappedQuery) {
query = ((WrappedQuery) query).getWrappedQuery();
}
query = QueryUtils.makeQueryable(query);
return getDocSetNC(query, null);
}
}
// Get the absolute value (positive version) of this query. If we
// get back the same reference, we know it's positive.
Query absQ = QueryUtils.getAbs(query);
boolean positive = query == absQ;
if (filterCache != null) {
DocSet absAnswer = filterCache.get(absQ);
if (absAnswer != null) {
if (positive) return absAnswer;
else return getLiveDocSet().andNot(absAnswer);
}
}
DocSet absAnswer = getDocSetNC(absQ, null);
DocSet answer = positive ? absAnswer : getLiveDocSet().andNot(absAnswer);
if (filterCache != null) {
// cache negative queries as positive
filterCache.put(absQ, absAnswer);
}
return answer;
}
// only handle positive (non negative) queries
DocSet getPositiveDocSet(Query q) throws IOException {
DocSet answer;
if (filterCache != null) {
answer = filterCache.get(q);
if (answer != null) return answer;
}
answer = getDocSetNC(q, null);
if (filterCache != null) filterCache.put(q, answer);
return answer;
}
private static Query matchAllDocsQuery = new MatchAllDocsQuery();
private volatile BitDocSet liveDocs;
@Deprecated // TODO remove for 8.0
public BitDocSet getLiveDocs() throws IOException {
return getLiveDocSet();
}
/**
* Returns an efficient random-access {@link DocSet} of the live docs. It's cached. Never null.
* @lucene.internal the type of DocSet returned may change in the future
*/
public BitDocSet getLiveDocSet() throws IOException {
// Going through the filter cache will provide thread safety here if we only had getLiveDocs,
// but the addition of setLiveDocs means we needed to add volatile to "liveDocs".
BitDocSet docs = liveDocs;
if (docs == null) {
//note: maybe should instead calc manually by segment, using FixedBitSet.copyOf(segLiveDocs); avoid filter cache?
liveDocs = docs = getDocSetBits(matchAllDocsQuery);
}
assert docs.size() == numDocs();
return docs;
}
/**
* Returns an efficient random-access {@link Bits} of the live docs. It's cached. Null means all docs are live.
* Use this like {@link LeafReader#getLiveDocs()}.
* @lucene.internal
*/
//TODO rename to getLiveDocs in 8.0
public Bits getLiveDocsBits() throws IOException {
return getIndexReader().hasDeletions() ? getLiveDocSet().getBits() : null;
}
/** @lucene.internal */
public boolean isLiveDocsInstantiated() {
return liveDocs != null;
}
/** @lucene.internal */
public void setLiveDocs(DocSet docs) {
// a few places currently expect BitDocSet
assert docs.size() == numDocs();
this.liveDocs = makeBitDocSet(docs);
}
private static Comparator sortByCost = (q1, q2) -> ((ExtendedQuery) q1).getCost() - ((ExtendedQuery) q2).getCost();
/**
* Returns the set of document ids matching all queries. This method is cache-aware and attempts to retrieve the
* answer from the cache if possible. If the answer was not cached, it may have been inserted into the cache as a
* result of this call. This method can handle negative queries.
* A null/empty list results in {@link #getLiveDocSet()}.
*
* The DocSet returned should not be modified.
*/
public DocSet getDocSet(List queries) throws IOException {
ProcessedFilter pf = getProcessedFilter(null, queries);
if (pf.postFilter == null) {
if (pf.answer != null) {
return pf.answer;
} else if (pf.filter == null) {
return getLiveDocSet(); // note: this is what happens when queries is an empty list
}
}
DocSetCollector setCollector = new DocSetCollector(maxDoc());
Collector collector = setCollector;
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
Query query = pf.filter != null ? pf.filter : matchAllDocsQuery;
search(query, collector);
if (collector instanceof DelegatingCollector) {
((DelegatingCollector) collector).finish();
}
return DocSetUtil.getDocSet(setCollector, this);
}
/**
* INTERNAL: The response object from {@link #getProcessedFilter(DocSet, List)}.
* Holds a filter and postFilter pair that together match a set of documents.
* Either of them may be null, in which case the semantics are to match everything.
* @see #getProcessedFilter(DocSet, List)
*/
public static class ProcessedFilter {
public DocSet answer; // maybe null. Sometimes we have a docSet answer that represents the complete answer / result.
public Filter filter; // maybe null
public DelegatingCollector postFilter; // maybe null
}
/**
* INTERNAL: Processes conjunction (AND) of both args into a {@link ProcessedFilter} result.
* Either arg may be null/empty thus doesn't restrict the matching docs.
* Queries typically are resolved against the filter cache, and populate it.
*/
public ProcessedFilter getProcessedFilter(DocSet setFilter, List queries) throws IOException {
ProcessedFilter pf = new ProcessedFilter();
if (queries == null || queries.size() == 0) {
if (setFilter != null) {
pf.answer = setFilter;
pf.filter = setFilter.getTopFilter();
}
return pf;
}
// We combine all the filter queries that come from the filter cache & setFilter into "answer".
// This might become pf.filterAsDocSet but not if there are any non-cached filters
DocSet answer = null;
boolean[] neg = new boolean[queries.size() + 1];
DocSet[] sets = new DocSet[queries.size() + 1];
List notCached = null;
List postFilters = null;
int end = 0;
int smallestIndex = -1;
if (setFilter != null) {
answer = sets[end++] = setFilter;
smallestIndex = end;
} // we are done with setFilter at this point
int smallestCount = Integer.MAX_VALUE;
for (Query q : queries) {
if (q instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery) q;
if (!eq.getCache()) {
if (eq.getCost() >= 100 && eq instanceof PostFilter) {
if (postFilters == null) postFilters = new ArrayList<>(sets.length - end);
postFilters.add(q);
} else {
if (notCached == null) notCached = new ArrayList<>(sets.length - end);
notCached.add(q);
}
continue;
}
}
if (filterCache == null) {
// there is no cache: don't pull bitsets
if (notCached == null) notCached = new ArrayList<>(sets.length - end);
WrappedQuery uncached = new WrappedQuery(q);
uncached.setCache(false);
notCached.add(uncached);
continue;
}
Query posQuery = QueryUtils.getAbs(q);
sets[end] = getPositiveDocSet(posQuery);
// Negative query if absolute value different from original
if (q == posQuery) {
neg[end] = false;
// keep track of the smallest positive set.
// This optimization is only worth it if size() is cached, which it would
// be if we don't do any set operations.
int sz = sets[end].size();
if (sz < smallestCount) {
smallestCount = sz;
smallestIndex = end;
answer = sets[end];
}
} else {
neg[end] = true;
}
end++;
}
// Are all of our normal cached filters negative?
if (end > 0 && answer == null) {
answer = getLiveDocSet();
}
// do negative queries first to shrink set size
for (int i = 0; i < end; i++) {
if (neg[i]) answer = answer.andNot(sets[i]);
}
for (int i = 0; i < end; i++) {
if (!neg[i] && i != smallestIndex) answer = answer.intersection(sets[i]);
}
// ignore "answer" if it simply matches all docs
if (answer != null && answer.size() == numDocs()) {
answer = null;
}
// answer is done.
// If no notCached nor postFilters, we can return now.
if (notCached == null && postFilters == null) {
// "answer" is the only part of the filter, so set it.
if (answer != null) {
pf.answer = answer;
pf.filter = answer.getTopFilter();
}
return pf;
}
// pf.answer will remain null ... (our local "answer" var is not the complete answer)
// Set pf.filter based on combining "answer" and "notCached"
if (notCached == null) {
if (answer != null) {
pf.filter = answer.getTopFilter();
}
} else {
Collections.sort(notCached, sortByCost);
List weights = new ArrayList<>(notCached.size());
for (Query q : notCached) {
Query qq = QueryUtils.makeQueryable(q);
weights.add(createWeight(rewrite(qq), ScoreMode.COMPLETE_NO_SCORES, 1));
}
pf.filter = new FilterImpl(answer, weights);
}
// Set pf.postFilter
if (postFilters != null) {
Collections.sort(postFilters, sortByCost);
for (int i = postFilters.size() - 1; i >= 0; i--) {
DelegatingCollector prev = pf.postFilter;
pf.postFilter = ((PostFilter) postFilters.get(i)).getFilterCollector(this);
if (prev != null) pf.postFilter.setDelegate(prev);
}
}
return pf;
}
/** @lucene.internal */
public DocSet getDocSet(DocsEnumState deState) throws IOException {
int largestPossible = deState.termsEnum.docFreq();
boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached;
TermQuery key = null;
if (useCache) {
key = new TermQuery(new Term(deState.fieldName, deState.termsEnum.term()));
DocSet result = filterCache.get(key);
if (result != null) return result;
}
int smallSetSize = DocSetUtil.smallSetSize(maxDoc());
int scratchSize = Math.min(smallSetSize, largestPossible);
if (deState.scratch == null || deState.scratch.length < scratchSize) deState.scratch = new int[scratchSize];
final int[] docs = deState.scratch;
int upto = 0;
int bitsSet = 0;
FixedBitSet fbs = null;
PostingsEnum postingsEnum = deState.termsEnum.postings(deState.postingsEnum, PostingsEnum.NONE);
postingsEnum = BitsFilteredPostingsEnum.wrap(postingsEnum, deState.liveDocs);
if (deState.postingsEnum == null) {
deState.postingsEnum = postingsEnum;
}
if (postingsEnum instanceof MultiPostingsEnum) {
MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
for (int subindex = 0; subindex < numSubs; subindex++) {
MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
if (sub.postingsEnum == null) continue;
int base = sub.slice.start;
int docid;
if (largestPossible > docs.length) {
if (fbs == null) fbs = new FixedBitSet(maxDoc());
while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
fbs.set(docid + base);
bitsSet++;
}
} else {
while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
docs[upto++] = docid + base;
}
}
}
} else {
int docid;
if (largestPossible > docs.length) {
fbs = new FixedBitSet(maxDoc());
while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
fbs.set(docid);
bitsSet++;
}
} else {
while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
docs[upto++] = docid;
}
}
}
DocSet result;
if (fbs != null) {
for (int i = 0; i < upto; i++) {
fbs.set(docs[i]);
}
bitsSet += upto;
result = new BitDocSet(fbs, bitsSet);
} else {
result = upto == 0 ? DocSet.EMPTY : new SortedIntDocSet(Arrays.copyOf(docs, upto));
}
if (useCache) {
filterCache.put(key, result);
}
return result;
}
// query must be positive
protected DocSet getDocSetNC(Query query, DocSet filter) throws IOException {
return DocSetUtil.createDocSet(this, query, filter);
}
/**
* Returns the set of document ids matching both the query and the filter. This method is cache-aware and attempts to
* retrieve the answer from the cache if possible. If the answer was not cached, it may have been inserted into the
* cache as a result of this call.
*
*
* @param filter
* may be null
* @return DocSet meeting the specified criteria, should not be modified by the caller.
*/
public DocSet getDocSet(Query query, DocSet filter) throws IOException {
if (filter == null) return getDocSet(query);
if (query instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery) query;
if (!eq.getCache()) {
if (query instanceof WrappedQuery) {
query = ((WrappedQuery) query).getWrappedQuery();
}
query = QueryUtils.makeQueryable(query);
return getDocSetNC(query, filter);
}
}
// Negative query if absolute value different from original
Query absQ = QueryUtils.getAbs(query);
boolean positive = absQ == query;
DocSet first;
if (filterCache != null) {
first = filterCache.get(absQ);
if (first == null) {
first = getDocSetNC(absQ, null);
filterCache.put(absQ, first);
}
return positive ? first.intersection(filter) : filter.andNot(first);
}
// If there isn't a cache, then do a single filtered query if positive.
return positive ? getDocSetNC(absQ, filter) : filter.andNot(getPositiveDocSet(absQ));
}
/**
* Returns documents matching both query
and filter
and sorted by sort
.
*
* This method is cache aware and may retrieve filter
from the cache or make an insertion into the cache
* as a result of this call.
*
* FUTURE: The returned DocList may be retrieved from a cache.
*
* @param filter
* may be null
* @param lsort
* criteria by which to sort (if null, query relevance is used)
* @param offset
* offset into the list of documents to return
* @param len
* maximum number of documents to return
* @return DocList meeting the specified criteria, should not be modified by the caller.
* @throws IOException
* If there is a low-level I/O error.
*/
public DocList getDocList(Query query, Query filter, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len);
QueryResult qr = new QueryResult();
search(qr, qc);
return qr.getDocList();
}
/**
* Returns documents matching both query
and the intersection of the filterList
, sorted by
* sort
.
*
* This method is cache aware and may retrieve filter
from the cache or make an insertion into the cache
* as a result of this call.
*
* FUTURE: The returned DocList may be retrieved from a cache.
*
* @param filterList
* may be null
* @param lsort
* criteria by which to sort (if null, query relevance is used)
* @param offset
* offset into the list of documents to return
* @param len
* maximum number of documents to return
* @return DocList meeting the specified criteria, should not be modified by the caller.
* @throws IOException
* If there is a low-level I/O error.
*/
public DocList getDocList(Query query, List filterList, Sort lsort, int offset, int len, int flags)
throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filterList)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setFlags(flags);
QueryResult qr = new QueryResult();
search(qr, qc);
return qr.getDocList();
}
public static final int NO_CHECK_QCACHE = 0x80000000;
public static final int GET_DOCSET = 0x40000000;
static final int NO_CHECK_FILTERCACHE = 0x20000000;
static final int NO_SET_QCACHE = 0x10000000;
static final int SEGMENT_TERMINATE_EARLY = 0x08;
public static final int TERMINATE_EARLY = 0x04;
public static final int GET_DOCLIST = 0x02; // get the documents actually returned in a response
public static final int GET_SCORES = 0x01;
/**
* getDocList version that uses+populates query and filter caches. In the event of a timeout, the cache is not
* populated.
*/
private void getDocListC(QueryResult qr, QueryCommand cmd) throws IOException {
DocListAndSet out = new DocListAndSet();
qr.setDocListAndSet(out);
QueryResultKey key = null;
int maxDocRequested = cmd.getOffset() + cmd.getLen();
// check for overflow, and check for # docs in index
if (maxDocRequested < 0 || maxDocRequested > maxDoc()) maxDocRequested = maxDoc();
int supersetMaxDoc = maxDocRequested;
DocList superset = null;
int flags = cmd.getFlags();
Query q = cmd.getQuery();
if (q instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery) q;
if (!eq.getCache()) {
flags |= (NO_CHECK_QCACHE | NO_SET_QCACHE | NO_CHECK_FILTERCACHE);
}
}
// we can try and look up the complete query in the cache.
// we can't do that if filter!=null though (we don't want to
// do hashCode() and equals() for a big DocSet).
if (queryResultCache != null && cmd.getFilter() == null
&& (flags & (NO_CHECK_QCACHE | NO_SET_QCACHE)) != ((NO_CHECK_QCACHE | NO_SET_QCACHE))) {
// all of the current flags can be reused during warming,
// so set all of them on the cache key.
key = new QueryResultKey(q, cmd.getFilterList(), cmd.getSort(), flags, cmd.getMinExactCount());
if ((flags & NO_CHECK_QCACHE) == 0) {
superset = queryResultCache.get(key);
if (superset != null) {
// check that the cache entry has scores recorded if we need them
if ((flags & GET_SCORES) == 0 || superset.hasScores()) {
// NOTE: subset() returns null if the DocList has fewer docs than
// requested
out.docList = superset.subset(cmd.getOffset(), cmd.getLen());
}
}
if (out.docList != null) {
// found the docList in the cache... now check if we need the docset too.
// OPT: possible future optimization - if the doclist contains all the matches,
// use it to make the docset instead of rerunning the query.
if (out.docSet == null && ((flags & GET_DOCSET) != 0)) {
if (cmd.getFilterList() == null) {
out.docSet = getDocSet(cmd.getQuery());
} else {
List newList = new ArrayList<>(cmd.getFilterList().size() + 1);
newList.add(cmd.getQuery());
newList.addAll(cmd.getFilterList());
out.docSet = getDocSet(newList);
}
}
return;
}
}
// If we are going to generate the result, bump up to the
// next resultWindowSize for better caching.
if ((flags & NO_SET_QCACHE) == 0) {
// handle 0 special case as well as avoid idiv in the common case.
if (maxDocRequested < queryResultWindowSize) {
supersetMaxDoc = queryResultWindowSize;
} else {
supersetMaxDoc = ((maxDocRequested - 1) / queryResultWindowSize + 1) * queryResultWindowSize;
if (supersetMaxDoc < 0) supersetMaxDoc = maxDocRequested;
}
} else {
key = null; // we won't be caching the result
}
}
cmd.setSupersetMaxDoc(supersetMaxDoc);
// OK, so now we need to generate an answer.
// One way to do that would be to check if we have an unordered list
// of results for the base query. If so, we can apply the filters and then
// sort by the resulting set. This can only be used if:
// - the sort doesn't contain score
// - we don't want score returned.
// check if we should try and use the filter cache
boolean useFilterCache = false;
if ((flags & (GET_SCORES | NO_CHECK_FILTERCACHE)) == 0 && useFilterForSortedQuery && cmd.getSort() != null
&& filterCache != null) {
useFilterCache = true;
SortField[] sfields = cmd.getSort().getSort();
for (SortField sf : sfields) {
if (sf.getType() == SortField.Type.SCORE) {
useFilterCache = false;
break;
}
}
}
if (useFilterCache) {
// now actually use the filter cache.
// for large filters that match few documents, this may be
// slower than simply re-executing the query.
if (out.docSet == null) {
out.docSet = getDocSet(cmd.getQuery(), cmd.getFilter());
List filterList = cmd.getFilterList();
if (filterList != null && !filterList.isEmpty()) {
out.docSet = out.docSet.intersection(getDocSet(cmd.getFilterList()));
}
}
// todo: there could be a sortDocSet that could take a list of
// the filters instead of anding them first...
// perhaps there should be a multi-docset-iterator
sortDocSet(qr, cmd);
} else {
// do it the normal way...
if ((flags & GET_DOCSET) != 0) {
// this currently conflates returning the docset for the base query vs
// the base query and all filters.
DocSet qDocSet = getDocListAndSetNC(qr, cmd);
// cache the docSet matching the query w/o filtering
if (qDocSet != null && filterCache != null && !qr.isPartialResults()) filterCache.put(cmd.getQuery(), qDocSet);
} else {
getDocListNC(qr, cmd);
}
assert null != out.docList : "docList is null";
}
if (null == cmd.getCursorMark()) {
// Kludge...
// we can't use DocSlice.subset, even though it should be an identity op
// because it gets confused by situations where there are lots of matches, but
// less docs in the slice then were requested, (due to the cursor)
// so we have to short circuit the call.
// None of which is really a problem since we can't use caching with
// cursors anyway, but it still looks weird to have to special case this
// behavior based on this condition - hence the long explanation.
superset = out.docList;
out.docList = superset.subset(cmd.getOffset(), cmd.getLen());
} else {
// sanity check our cursor assumptions
assert null == superset : "cursor: superset isn't null";
assert 0 == cmd.getOffset() : "cursor: command offset mismatch";
assert 0 == out.docList.offset() : "cursor: docList offset mismatch";
assert cmd.getLen() >= supersetMaxDoc : "cursor: superset len mismatch: " + cmd.getLen() + " vs "
+ supersetMaxDoc;
}
// lastly, put the superset in the cache if the size is less than or equal
// to queryResultMaxDocsCached
if (key != null && superset.size() <= queryResultMaxDocsCached && !qr.isPartialResults()) {
queryResultCache.put(key, superset);
}
}
/**
* Helper method for extracting the {@link FieldDoc} sort values from a {@link TopFieldDocs} when available and making
* the appropriate call to {@link QueryResult#setNextCursorMark} when applicable.
*
* @param qr
* QueryResult
to modify
* @param qc
* QueryCommand
for context of method
* @param topDocs
* May or may not be a TopFieldDocs
*/
private void populateNextCursorMarkFromTopDocs(QueryResult qr, QueryCommand qc, TopDocs topDocs) {
// TODO: would be nice to rename & generalize this method for non-cursor cases...
// ...would be handy to reuse the ScoreDoc/FieldDoc sort vals directly in distrib sort
// ...but that has non-trivial queryResultCache implications
// See: SOLR-5595
if (null == qc.getCursorMark()) {
// nothing to do, short circuit out
return;
}
final CursorMark lastCursorMark = qc.getCursorMark();
// if we have a cursor, then we have a sort that at minimum involves uniqueKey..
// so we must have a TopFieldDocs containing FieldDoc[]
assert topDocs instanceof TopFieldDocs : "TopFieldDocs cursor constraint violated";
final TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs;
final ScoreDoc[] scoreDocs = topFieldDocs.scoreDocs;
if (0 == scoreDocs.length) {
// no docs on this page, re-use existing cursor mark
qr.setNextCursorMark(lastCursorMark);
} else {
ScoreDoc lastDoc = scoreDocs[scoreDocs.length - 1];
assert lastDoc instanceof FieldDoc : "FieldDoc cursor constraint violated";
List