org.elasticsearch.search.aggregations.timeseries.InternalTimeSeries Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of elasticsearch Show documentation
Show all versions of elasticsearch Show documentation
Elasticsearch subproject :server
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search.aggregations.timeseries;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.aggregations.AggregationReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation.declareMultiBucketAggregationFields;
public class InternalTimeSeries extends InternalMultiBucketAggregation
implements
TimeSeries {
private static final ObjectParser PARSER = new ObjectParser<>(
ParsedTimeSeries.class.getSimpleName(),
true,
ParsedTimeSeries::new
);
static {
declareMultiBucketAggregationFields(
PARSER,
parser -> ParsedTimeSeries.ParsedBucket.fromXContent(parser, false),
parser -> ParsedTimeSeries.ParsedBucket.fromXContent(parser, true)
);
}
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements TimeSeries.Bucket {
protected long bucketOrd;
protected final boolean keyed;
protected final Map key;
protected long docCount;
protected InternalAggregations aggregations;
public InternalBucket(Map key, long docCount, InternalAggregations aggregations, boolean keyed) {
this.key = key;
this.docCount = docCount;
this.aggregations = aggregations;
this.keyed = keyed;
}
/**
* Read from a stream.
*/
public InternalBucket(StreamInput in, boolean keyed) throws IOException {
this.keyed = keyed;
key = in.readOrderedMap(StreamInput::readString, StreamInput::readGenericValue);
docCount = in.readVLong();
aggregations = InternalAggregations.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(key, StreamOutput::writeString, StreamOutput::writeGenericValue);
out.writeVLong(docCount);
aggregations.writeTo(out);
}
@Override
public Map getKey() {
return key;
}
@Override
public String getKeyAsString() {
return key.toString();
}
@Override
public long getDocCount() {
return docCount;
}
@Override
public InternalAggregations getAggregations() {
return aggregations;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (keyed) {
builder.startObject(getKeyAsString());
} else {
builder.startObject();
}
builder.field(CommonFields.KEY.getPreferredName(), key);
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
InternalTimeSeries.InternalBucket that = (InternalTimeSeries.InternalBucket) other;
return Objects.equals(key, that.key)
&& Objects.equals(keyed, that.keyed)
&& Objects.equals(docCount, that.docCount)
&& Objects.equals(aggregations, that.aggregations);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), key, keyed, docCount, aggregations);
}
}
private final List buckets;
private final boolean keyed;
// bucketMap gets lazily initialized from buckets in getBucketByKey()
private transient Map bucketMap;
public InternalTimeSeries(String name, List buckets, boolean keyed, Map metadata) {
super(name, metadata);
this.buckets = buckets;
this.keyed = keyed;
}
/**
* Read from a stream.
*/
public InternalTimeSeries(StreamInput in) throws IOException {
super(in);
keyed = in.readBoolean();
int size = in.readVInt();
List buckets = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
buckets.add(new InternalTimeSeries.InternalBucket(in, keyed));
}
this.buckets = buckets;
this.bucketMap = null;
}
@Override
public String getWriteableName() {
return TimeSeriesAggregationBuilder.NAME;
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
if (keyed) {
builder.startObject(CommonFields.BUCKETS.getPreferredName());
} else {
builder.startArray(CommonFields.BUCKETS.getPreferredName());
}
for (InternalBucket bucket : buckets) {
bucket.toXContent(builder, params);
}
if (keyed) {
builder.endObject();
} else {
builder.endArray();
}
return builder;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeBoolean(keyed);
out.writeCollection(buckets);
}
@Override
public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) {
// We still need to reduce in case we got the same time series in 2 different indices, but we should be able to optimize
// that in the future
Map