org.elasticsearch.search.query.QuerySearchResult Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of elasticsearch Show documentation
Show all versions of elasticsearch Show documentation
Elasticsearch subproject :server
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search.query;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.io.stream.DelayableWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.RefCounted;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.RescoreDocIds;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.internal.ShardSearchContextId;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.profile.SearchProfileDfsPhaseResult;
import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult;
import org.elasticsearch.search.rank.RankShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.common.lucene.Lucene.readTopDocs;
import static org.elasticsearch.common.lucene.Lucene.writeTopDocs;
public final class QuerySearchResult extends SearchPhaseResult {
private int from;
private int size;
private TopDocsAndMaxScore topDocsAndMaxScore;
private boolean hasScoreDocs;
private RankShardResult rankShardResult;
private TotalHits totalHits;
private float maxScore = Float.NaN;
private DocValueFormat[] sortValueFormats;
/**
* Aggregation results. We wrap them in
* {@linkplain DelayableWriteable} because
* {@link InternalAggregation} is usually made up of many small objects
* which have a fairly high overhead in the JVM. So we delay deserializing
* them until just before we need them.
*/
private DelayableWriteable aggregations;
private boolean hasAggs;
private Suggest suggest;
private boolean searchTimedOut;
private Boolean terminatedEarly = null;
private SearchProfileQueryPhaseResult profileShardResults;
private boolean hasProfileResults;
private long serviceTimeEWMA = -1;
private int nodeQueueSize = -1;
private final boolean isNull;
private final RefCounted refCounted;
private final List toRelease;
public QuerySearchResult() {
this(false);
}
public QuerySearchResult(StreamInput in) throws IOException {
this(in, false);
}
/**
* Read the object, but using a delayed aggregations field when delayedAggregations=true. Using this, the caller must ensure that
* either `consumeAggs` or `releaseAggs` is called if `hasAggs() == true`.
* @param delayedAggregations whether to use delayed aggregations or not
*/
public QuerySearchResult(StreamInput in, boolean delayedAggregations) throws IOException {
super(in);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) {
isNull = in.readBoolean();
} else {
isNull = false;
}
if (isNull == false) {
ShardSearchContextId id = new ShardSearchContextId(in);
readFromWithId(id, in, delayedAggregations);
}
refCounted = null;
toRelease = null;
}
public QuerySearchResult(ShardSearchContextId contextId, SearchShardTarget shardTarget, ShardSearchRequest shardSearchRequest) {
this.contextId = contextId;
setSearchShardTarget(shardTarget);
isNull = false;
setShardSearchRequest(shardSearchRequest);
this.refCounted = AbstractRefCounted.of(this::close);
this.toRelease = new ArrayList<>();
}
private QuerySearchResult(boolean isNull) {
this.isNull = isNull;
this.refCounted = null;
toRelease = null;
}
/**
* Returns an instance that contains no response.
*/
public static QuerySearchResult nullInstance() {
return new QuerySearchResult(true);
}
/**
* Returns true if the result doesn't contain any useful information.
* It is used by the search action to avoid creating an empty response on
* shard request that rewrites to match_no_docs.
*
* TODO: Currently we need the concrete aggregators to build empty responses. This means that we cannot
* build an empty response in the coordinating node so we rely on this hack to ensure that at least one shard
* returns a valid empty response. We should move the ability to create empty responses to aggregation builders
* in order to allow building empty responses directly from the coordinating node.
*/
public boolean isNull() {
return isNull;
}
@Override
public QuerySearchResult queryResult() {
return this;
}
public void searchTimedOut(boolean searchTimedOut) {
this.searchTimedOut = searchTimedOut;
}
public boolean searchTimedOut() {
return searchTimedOut;
}
public void terminatedEarly(boolean terminatedEarly) {
this.terminatedEarly = terminatedEarly;
}
public Boolean terminatedEarly() {
return this.terminatedEarly;
}
public TopDocsAndMaxScore topDocs() {
if (topDocsAndMaxScore == null) {
throw new IllegalStateException("topDocs already consumed");
}
return topDocsAndMaxScore;
}
/**
* Returns true
iff the top docs have already been consumed.
*/
public boolean hasConsumedTopDocs() {
return topDocsAndMaxScore == null;
}
/**
* Returns and nulls out the top docs for this search results. This allows to free up memory once the top docs are consumed.
* @throws IllegalStateException if the top docs have already been consumed.
*/
public TopDocsAndMaxScore consumeTopDocs() {
TopDocsAndMaxScore topDocsAndMaxScore = this.topDocsAndMaxScore;
if (topDocsAndMaxScore == null) {
throw new IllegalStateException("topDocs already consumed");
}
this.topDocsAndMaxScore = null;
return topDocsAndMaxScore;
}
public void topDocs(TopDocsAndMaxScore topDocs, DocValueFormat[] sortValueFormats) {
setTopDocs(topDocs);
if (topDocs.topDocs.scoreDocs.length > 0 && topDocs.topDocs.scoreDocs[0] instanceof FieldDoc) {
int numFields = ((FieldDoc) topDocs.topDocs.scoreDocs[0]).fields.length;
if (numFields != sortValueFormats.length) {
throw new IllegalArgumentException(
"The number of sort fields does not match: " + numFields + " != " + sortValueFormats.length
);
}
}
this.sortValueFormats = sortValueFormats;
}
private void setTopDocs(TopDocsAndMaxScore topDocsAndMaxScore) {
this.topDocsAndMaxScore = topDocsAndMaxScore;
this.totalHits = topDocsAndMaxScore.topDocs.totalHits;
this.maxScore = topDocsAndMaxScore.maxScore;
this.hasScoreDocs = topDocsAndMaxScore.topDocs.scoreDocs.length > 0;
}
public void setRankShardResult(RankShardResult rankShardResult) {
this.rankShardResult = rankShardResult;
}
public RankShardResult getRankShardResult() {
return rankShardResult;
}
public DocValueFormat[] sortValueFormats() {
return sortValueFormats;
}
/**
* Returns true
if this query result has unconsumed aggregations
*/
public boolean hasAggs() {
return hasAggs;
}
/**
* Returns and nulls out the aggregation for this search results. This allows to free up memory once the aggregation is consumed.
* @throws IllegalStateException if the aggregations have already been consumed.
*/
public InternalAggregations consumeAggs() {
if (aggregations == null) {
throw new IllegalStateException("aggs already consumed");
}
try {
return aggregations.expand();
} finally {
aggregations.close();
aggregations = null;
}
}
public void releaseAggs() {
if (aggregations != null) {
aggregations.close();
aggregations = null;
}
}
private void close() {
Releasables.close(toRelease);
}
public void addReleasable(Releasable releasable) {
toRelease.add(releasable);
}
public void aggregations(InternalAggregations aggregations) {
assert this.aggregations == null : "aggregations already set to [" + this.aggregations + "]";
this.aggregations = aggregations == null ? null : DelayableWriteable.referencing(aggregations);
hasAggs = aggregations != null;
}
public DelayableWriteable aggregations() {
return aggregations;
}
public void setSearchProfileDfsPhaseResult(SearchProfileDfsPhaseResult searchProfileDfsPhaseResult) {
if (profileShardResults == null) {
return;
}
profileShardResults.setSearchProfileDfsPhaseResult(searchProfileDfsPhaseResult);
}
/**
* Returns and nulls out the profiled results for this search, or potentially null if result was empty.
* This allows to free up memory once the profiled result is consumed.
* @throws IllegalStateException if the profiled result has already been consumed.
*/
public SearchProfileQueryPhaseResult consumeProfileResult() {
if (profileShardResults == null) {
throw new IllegalStateException("profile results already consumed");
}
SearchProfileQueryPhaseResult result = profileShardResults;
profileShardResults = null;
return result;
}
public boolean hasProfileResults() {
return hasProfileResults;
}
public void consumeAll() {
if (hasProfileResults()) {
consumeProfileResult();
}
if (hasConsumedTopDocs() == false) {
consumeTopDocs();
}
releaseAggs();
}
/**
* Sets the finalized profiling results for this query
* @param shardResults The finalized profile
*/
public void profileResults(SearchProfileQueryPhaseResult shardResults) {
this.profileShardResults = shardResults;
hasProfileResults = shardResults != null;
}
public Suggest suggest() {
return suggest;
}
public void suggest(Suggest suggest) {
this.suggest = suggest;
}
public int from() {
return from;
}
public QuerySearchResult from(int from) {
this.from = from;
return this;
}
/**
* Returns the maximum size of this results top docs.
*/
public int size() {
return size;
}
public QuerySearchResult size(int size) {
this.size = size;
return this;
}
public long serviceTimeEWMA() {
return this.serviceTimeEWMA;
}
public QuerySearchResult serviceTimeEWMA(long serviceTimeEWMA) {
this.serviceTimeEWMA = serviceTimeEWMA;
return this;
}
public int nodeQueueSize() {
return this.nodeQueueSize;
}
public QuerySearchResult nodeQueueSize(int nodeQueueSize) {
this.nodeQueueSize = nodeQueueSize;
return this;
}
/**
* Returns true
if this result has any suggest score docs
*/
public boolean hasSuggestHits() {
return (suggest != null && suggest.hasScoreDocs());
}
public boolean hasSearchContext() {
return hasScoreDocs || hasSuggestHits() || rankShardResult != null;
}
public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOException {
readFromWithId(id, in, false);
}
private void readFromWithId(ShardSearchContextId id, StreamInput in, boolean delayedAggregations) throws IOException {
this.contextId = id;
from = in.readVInt();
size = in.readVInt();
int numSortFieldsPlus1 = in.readVInt();
if (numSortFieldsPlus1 == 0) {
sortValueFormats = null;
} else {
sortValueFormats = new DocValueFormat[numSortFieldsPlus1 - 1];
for (int i = 0; i < sortValueFormats.length; ++i) {
sortValueFormats[i] = in.readNamedWriteable(DocValueFormat.class);
}
}
setTopDocs(readTopDocs(in));
hasAggs = in.readBoolean();
boolean success = false;
try {
if (hasAggs) {
if (delayedAggregations) {
aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in);
} else {
aggregations = DelayableWriteable.referencing(InternalAggregations::readFrom, in);
}
}
if (in.readBoolean()) {
suggest = new Suggest(in);
}
searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
profileShardResults = in.readOptionalWriteable(SearchProfileQueryPhaseResult::new);
hasProfileResults = profileShardResults != null;
serviceTimeEWMA = in.readZLong();
nodeQueueSize = in.readInt();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) {
setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new));
setRescoreDocIds(new RescoreDocIds(in));
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
rankShardResult = in.readOptionalNamedWriteable(RankShardResult.class);
}
success = true;
} finally {
if (success == false) {
// in case we were not able to deserialize the full message we must release the aggregation buffer
Releasables.close(aggregations);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
// we do not know that it is being sent over transport, but this at least protects all writes from happening, including sending.
if (aggregations != null && aggregations.isSerialized()) {
throw new IllegalStateException("cannot send serialized version since it will leak");
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) {
out.writeBoolean(isNull);
}
if (isNull == false) {
contextId.writeTo(out);
writeToNoId(out);
}
}
public void writeToNoId(StreamOutput out) throws IOException {
out.writeVInt(from);
out.writeVInt(size);
if (sortValueFormats == null) {
out.writeVInt(0);
} else {
out.writeVInt(1 + sortValueFormats.length);
for (int i = 0; i < sortValueFormats.length; ++i) {
out.writeNamedWriteable(sortValueFormats[i]);
}
}
writeTopDocs(out, topDocsAndMaxScore);
out.writeOptionalWriteable(aggregations);
if (suggest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
suggest.writeTo(out);
}
out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly);
out.writeOptionalWriteable(profileShardResults);
out.writeZLong(serviceTimeEWMA);
out.writeInt(nodeQueueSize);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) {
out.writeOptionalWriteable(getShardSearchRequest());
getRescoreDocIds().writeTo(out);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeOptionalNamedWriteable(rankShardResult);
} else if (rankShardResult != null) {
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]");
}
}
public TotalHits getTotalHits() {
return totalHits;
}
public float getMaxScore() {
return maxScore;
}
@Override
public void incRef() {
if (refCounted != null) {
refCounted.incRef();
} else {
super.incRef();
}
}
@Override
public boolean tryIncRef() {
if (refCounted != null) {
return refCounted.tryIncRef();
}
return super.tryIncRef();
}
@Override
public boolean decRef() {
if (refCounted != null) {
return refCounted.decRef();
}
return super.decRef();
}
@Override
public boolean hasReferences() {
if (refCounted != null) {
return refCounted.hasReferences();
}
return super.hasReferences();
}
}