Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index.mapper.flattened;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.ImpactsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.OrdinalMap;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.MinimizationOperations;
import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.AutomatonQueries;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData;
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData;
import org.elasticsearch.index.mapper.DocumentParserContext;
import org.elasticsearch.index.mapper.DynamicFieldType;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperBuilderContext;
import org.elasticsearch.index.mapper.SourceLoader;
import org.elasticsearch.index.mapper.SourceValueFetcher;
import org.elasticsearch.index.mapper.StringFieldType;
import org.elasticsearch.index.mapper.TextParams;
import org.elasticsearch.index.mapper.TextSearchInfo;
import org.elasticsearch.index.mapper.ValueFetcher;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.similarity.SimilarityProvider;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.script.field.FlattenedDocValuesField;
import org.elasticsearch.script.field.ToScriptFieldFactory;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.MultiValueMode;
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import org.elasticsearch.search.sort.BucketedSort;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
* A field mapper that accepts a JSON object and flattens it into a single field. This data type
* can be a useful alternative to an 'object' mapping when the object has a large, unknown set
* of keys.
*
* Currently the mapper extracts all leaf values of the JSON object, converts them to their text
* representations, and indexes each one as a keyword. It creates both a 'keyed' version of the token
* to allow searches on particular key-value pairs, as well as a 'root' token without the key
*
* As an example, given a flattened field called 'field' and the following input
*
* {
* "field": {
* "key1": "some value",
* "key2": {
* "key3": true
* }
* }
* }
*
* the mapper will produce untokenized string fields with the name "field" and values
* "some value" and "true", as well as string fields called "field._keyed" with values
* "key1\0some value" and "key2.key3\0true". Note that \0 is used as a reserved separator
* character (see {@link FlattenedFieldParser#SEPARATOR}).
*/
public final class FlattenedFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "flattened";
public static final String KEYED_FIELD_SUFFIX = "._keyed";
public static final String TIME_SERIES_DIMENSIONS_ARRAY_PARAM = "time_series_dimensions";
private static class Defaults {
public static final int DEPTH_LIMIT = 20;
}
private static Builder builder(Mapper in) {
return ((FlattenedFieldMapper) in).builder;
}
public static class Builder extends FieldMapper.Builder {
final Parameter depthLimit = Parameter.intParam(
"depth_limit",
true,
m -> builder(m).depthLimit.get(),
Defaults.DEPTH_LIMIT
).addValidator(v -> {
if (v < 0) {
throw new IllegalArgumentException("[depth_limit] must be positive, got [" + v + "]");
}
});
private final Parameter indexed = Parameter.indexParam(m -> builder(m).indexed.get(), true);
private final Parameter hasDocValues = Parameter.docValuesParam(m -> builder(m).hasDocValues.get(), true);
private final Parameter nullValue = Parameter.stringParam("null_value", false, m -> builder(m).nullValue.get(), null)
.acceptsNull();
private final Parameter eagerGlobalOrdinals = Parameter.boolParam(
"eager_global_ordinals",
true,
m -> builder(m).eagerGlobalOrdinals.get(),
false
);
private final Parameter ignoreAbove = Parameter.intParam(
"ignore_above",
true,
m -> builder(m).ignoreAbove.get(),
Integer.MAX_VALUE
);
private final Parameter indexOptions = TextParams.keywordIndexOptions(m -> builder(m).indexOptions.get());
private final Parameter similarity = TextParams.similarity(m -> builder(m).similarity.get());
private final Parameter splitQueriesOnWhitespace = Parameter.boolParam(
"split_queries_on_whitespace",
true,
m -> builder(m).splitQueriesOnWhitespace.get(),
false
);
private final Parameter> dimensions = dimensionsParam(m -> builder(m).dimensions.get()).addValidator(v -> {
if (v.isEmpty() == false && (indexed.getValue() == false || hasDocValues.getValue() == false)) {
throw new IllegalArgumentException(
"Field ["
+ TIME_SERIES_DIMENSIONS_ARRAY_PARAM
+ "] requires that ["
+ indexed.name
+ "] and ["
+ hasDocValues.name
+ "] are true"
);
}
}).precludesParameters(ignoreAbove);
private final Parameter