All Downloads are FREE. Search and download functionalities are using the official Maven repository.

co.elastic.clients.elasticsearch.ElasticsearchAsyncClient Maven / Gradle / Ivy

The newest version!
/*
 * Licensed to Elasticsearch B.V. under one or more contributor
 * license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright
 * ownership. Elasticsearch B.V. licenses this file to you under
 * the Apache License, Version 2.0 (the "License"); you may
 * not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

package co.elastic.clients.elasticsearch;

import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.elasticsearch.async_search.ElasticsearchAsyncSearchAsyncClient;
import co.elastic.clients.elasticsearch.autoscaling.ElasticsearchAutoscalingAsyncClient;
import co.elastic.clients.elasticsearch.cat.ElasticsearchCatAsyncClient;
import co.elastic.clients.elasticsearch.ccr.ElasticsearchCcrAsyncClient;
import co.elastic.clients.elasticsearch.cluster.ElasticsearchClusterAsyncClient;
import co.elastic.clients.elasticsearch.connector.ElasticsearchConnectorAsyncClient;
import co.elastic.clients.elasticsearch.core.BulkRequest;
import co.elastic.clients.elasticsearch.core.BulkResponse;
import co.elastic.clients.elasticsearch.core.ClearScrollRequest;
import co.elastic.clients.elasticsearch.core.ClearScrollResponse;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeRequest;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeResponse;
import co.elastic.clients.elasticsearch.core.CountRequest;
import co.elastic.clients.elasticsearch.core.CountResponse;
import co.elastic.clients.elasticsearch.core.CreateRequest;
import co.elastic.clients.elasticsearch.core.CreateResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.DeleteRequest;
import co.elastic.clients.elasticsearch.core.DeleteResponse;
import co.elastic.clients.elasticsearch.core.DeleteScriptRequest;
import co.elastic.clients.elasticsearch.core.DeleteScriptResponse;
import co.elastic.clients.elasticsearch.core.ExistsRequest;
import co.elastic.clients.elasticsearch.core.ExistsSourceRequest;
import co.elastic.clients.elasticsearch.core.ExplainRequest;
import co.elastic.clients.elasticsearch.core.ExplainResponse;
import co.elastic.clients.elasticsearch.core.FieldCapsRequest;
import co.elastic.clients.elasticsearch.core.FieldCapsResponse;
import co.elastic.clients.elasticsearch.core.GetRequest;
import co.elastic.clients.elasticsearch.core.GetResponse;
import co.elastic.clients.elasticsearch.core.GetScriptContextRequest;
import co.elastic.clients.elasticsearch.core.GetScriptContextResponse;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesRequest;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesResponse;
import co.elastic.clients.elasticsearch.core.GetScriptRequest;
import co.elastic.clients.elasticsearch.core.GetScriptResponse;
import co.elastic.clients.elasticsearch.core.GetSourceRequest;
import co.elastic.clients.elasticsearch.core.GetSourceResponse;
import co.elastic.clients.elasticsearch.core.HealthReportRequest;
import co.elastic.clients.elasticsearch.core.HealthReportResponse;
import co.elastic.clients.elasticsearch.core.IndexRequest;
import co.elastic.clients.elasticsearch.core.IndexResponse;
import co.elastic.clients.elasticsearch.core.InfoRequest;
import co.elastic.clients.elasticsearch.core.InfoResponse;
import co.elastic.clients.elasticsearch.core.MgetRequest;
import co.elastic.clients.elasticsearch.core.MgetResponse;
import co.elastic.clients.elasticsearch.core.MsearchRequest;
import co.elastic.clients.elasticsearch.core.MsearchResponse;
import co.elastic.clients.elasticsearch.core.MsearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.MsearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.MtermvectorsRequest;
import co.elastic.clients.elasticsearch.core.MtermvectorsResponse;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeRequest;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeResponse;
import co.elastic.clients.elasticsearch.core.PingRequest;
import co.elastic.clients.elasticsearch.core.PutScriptRequest;
import co.elastic.clients.elasticsearch.core.PutScriptResponse;
import co.elastic.clients.elasticsearch.core.RankEvalRequest;
import co.elastic.clients.elasticsearch.core.RankEvalResponse;
import co.elastic.clients.elasticsearch.core.ReindexRequest;
import co.elastic.clients.elasticsearch.core.ReindexResponse;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleRequest;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleResponse;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteRequest;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteResponse;
import co.elastic.clients.elasticsearch.core.ScrollRequest;
import co.elastic.clients.elasticsearch.core.ScrollResponse;
import co.elastic.clients.elasticsearch.core.SearchMvtRequest;
import co.elastic.clients.elasticsearch.core.SearchRequest;
import co.elastic.clients.elasticsearch.core.SearchResponse;
import co.elastic.clients.elasticsearch.core.SearchShardsRequest;
import co.elastic.clients.elasticsearch.core.SearchShardsResponse;
import co.elastic.clients.elasticsearch.core.SearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.SearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.TermsEnumRequest;
import co.elastic.clients.elasticsearch.core.TermsEnumResponse;
import co.elastic.clients.elasticsearch.core.TermvectorsRequest;
import co.elastic.clients.elasticsearch.core.TermvectorsResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.UpdateRequest;
import co.elastic.clients.elasticsearch.core.UpdateResponse;
import co.elastic.clients.elasticsearch.dangling_indices.ElasticsearchDanglingIndicesAsyncClient;
import co.elastic.clients.elasticsearch.enrich.ElasticsearchEnrichAsyncClient;
import co.elastic.clients.elasticsearch.eql.ElasticsearchEqlAsyncClient;
import co.elastic.clients.elasticsearch.esql.ElasticsearchEsqlAsyncClient;
import co.elastic.clients.elasticsearch.features.ElasticsearchFeaturesAsyncClient;
import co.elastic.clients.elasticsearch.fleet.ElasticsearchFleetAsyncClient;
import co.elastic.clients.elasticsearch.graph.ElasticsearchGraphAsyncClient;
import co.elastic.clients.elasticsearch.ilm.ElasticsearchIlmAsyncClient;
import co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesAsyncClient;
import co.elastic.clients.elasticsearch.inference.ElasticsearchInferenceAsyncClient;
import co.elastic.clients.elasticsearch.ingest.ElasticsearchIngestAsyncClient;
import co.elastic.clients.elasticsearch.license.ElasticsearchLicenseAsyncClient;
import co.elastic.clients.elasticsearch.logstash.ElasticsearchLogstashAsyncClient;
import co.elastic.clients.elasticsearch.migration.ElasticsearchMigrationAsyncClient;
import co.elastic.clients.elasticsearch.ml.ElasticsearchMlAsyncClient;
import co.elastic.clients.elasticsearch.monitoring.ElasticsearchMonitoringAsyncClient;
import co.elastic.clients.elasticsearch.nodes.ElasticsearchNodesAsyncClient;
import co.elastic.clients.elasticsearch.query_rules.ElasticsearchQueryRulesAsyncClient;
import co.elastic.clients.elasticsearch.rollup.ElasticsearchRollupAsyncClient;
import co.elastic.clients.elasticsearch.search_application.ElasticsearchSearchApplicationAsyncClient;
import co.elastic.clients.elasticsearch.searchable_snapshots.ElasticsearchSearchableSnapshotsAsyncClient;
import co.elastic.clients.elasticsearch.security.ElasticsearchSecurityAsyncClient;
import co.elastic.clients.elasticsearch.shutdown.ElasticsearchShutdownAsyncClient;
import co.elastic.clients.elasticsearch.simulate.ElasticsearchSimulateAsyncClient;
import co.elastic.clients.elasticsearch.slm.ElasticsearchSlmAsyncClient;
import co.elastic.clients.elasticsearch.snapshot.ElasticsearchSnapshotAsyncClient;
import co.elastic.clients.elasticsearch.sql.ElasticsearchSqlAsyncClient;
import co.elastic.clients.elasticsearch.ssl.ElasticsearchSslAsyncClient;
import co.elastic.clients.elasticsearch.synonyms.ElasticsearchSynonymsAsyncClient;
import co.elastic.clients.elasticsearch.tasks.ElasticsearchTasksAsyncClient;
import co.elastic.clients.elasticsearch.text_structure.ElasticsearchTextStructureAsyncClient;
import co.elastic.clients.elasticsearch.transform.ElasticsearchTransformAsyncClient;
import co.elastic.clients.elasticsearch.watcher.ElasticsearchWatcherAsyncClient;
import co.elastic.clients.elasticsearch.xpack.ElasticsearchXpackAsyncClient;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.ElasticsearchTransportConfig;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BinaryResponse;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.transport.endpoints.EndpointWithResponseMapperAttr;
import co.elastic.clients.util.ObjectBuilder;
import java.lang.reflect.Type;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.annotation.Nullable;

//----------------------------------------------------------------
//       THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------

/**
 * Client for the namespace.
 */
public class ElasticsearchAsyncClient extends ApiClient {

	/**
	 * Creates a client from a {@link ElasticsearchTransportConfig.Default}}
	 * configuration created with an inline lambda expression.
	 */
	public static ElasticsearchAsyncClient of(
			Function fn) {
		return new ElasticsearchAsyncClient(
				fn.apply(new ElasticsearchTransportConfig.Builder()).build().buildTransport());
	}

	/**
	 * Creates a client from an {@link ElasticsearchTransportConfig}.
	 */
	public ElasticsearchAsyncClient(ElasticsearchTransportConfig config) {
		this(config.buildTransport());
	}

	public ElasticsearchAsyncClient(ElasticsearchTransport transport) {
		super(transport, null);
	}

	public ElasticsearchAsyncClient(ElasticsearchTransport transport, @Nullable TransportOptions transportOptions) {
		super(transport, transportOptions);
	}

	@Override
	public ElasticsearchAsyncClient withTransportOptions(@Nullable TransportOptions transportOptions) {
		return new ElasticsearchAsyncClient(this.transport, transportOptions);
	}

	// ----- Child clients

	public ElasticsearchAsyncSearchAsyncClient asyncSearch() {
		return new ElasticsearchAsyncSearchAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchAutoscalingAsyncClient autoscaling() {
		return new ElasticsearchAutoscalingAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchCatAsyncClient cat() {
		return new ElasticsearchCatAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchCcrAsyncClient ccr() {
		return new ElasticsearchCcrAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchClusterAsyncClient cluster() {
		return new ElasticsearchClusterAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchConnectorAsyncClient connector() {
		return new ElasticsearchConnectorAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchDanglingIndicesAsyncClient danglingIndices() {
		return new ElasticsearchDanglingIndicesAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchEnrichAsyncClient enrich() {
		return new ElasticsearchEnrichAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchEqlAsyncClient eql() {
		return new ElasticsearchEqlAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchEsqlAsyncClient esql() {
		return new ElasticsearchEsqlAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchFeaturesAsyncClient features() {
		return new ElasticsearchFeaturesAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchFleetAsyncClient fleet() {
		return new ElasticsearchFleetAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchGraphAsyncClient graph() {
		return new ElasticsearchGraphAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchIlmAsyncClient ilm() {
		return new ElasticsearchIlmAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchIndicesAsyncClient indices() {
		return new ElasticsearchIndicesAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchInferenceAsyncClient inference() {
		return new ElasticsearchInferenceAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchIngestAsyncClient ingest() {
		return new ElasticsearchIngestAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchLicenseAsyncClient license() {
		return new ElasticsearchLicenseAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchLogstashAsyncClient logstash() {
		return new ElasticsearchLogstashAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchMigrationAsyncClient migration() {
		return new ElasticsearchMigrationAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchMlAsyncClient ml() {
		return new ElasticsearchMlAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchMonitoringAsyncClient monitoring() {
		return new ElasticsearchMonitoringAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchNodesAsyncClient nodes() {
		return new ElasticsearchNodesAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchQueryRulesAsyncClient queryRules() {
		return new ElasticsearchQueryRulesAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchRollupAsyncClient rollup() {
		return new ElasticsearchRollupAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSearchApplicationAsyncClient searchApplication() {
		return new ElasticsearchSearchApplicationAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSearchableSnapshotsAsyncClient searchableSnapshots() {
		return new ElasticsearchSearchableSnapshotsAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSecurityAsyncClient security() {
		return new ElasticsearchSecurityAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchShutdownAsyncClient shutdown() {
		return new ElasticsearchShutdownAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSimulateAsyncClient simulate() {
		return new ElasticsearchSimulateAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSlmAsyncClient slm() {
		return new ElasticsearchSlmAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSnapshotAsyncClient snapshot() {
		return new ElasticsearchSnapshotAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSqlAsyncClient sql() {
		return new ElasticsearchSqlAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSslAsyncClient ssl() {
		return new ElasticsearchSslAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchSynonymsAsyncClient synonyms() {
		return new ElasticsearchSynonymsAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchTasksAsyncClient tasks() {
		return new ElasticsearchTasksAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchTextStructureAsyncClient textStructure() {
		return new ElasticsearchTextStructureAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchTransformAsyncClient transform() {
		return new ElasticsearchTransformAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchWatcherAsyncClient watcher() {
		return new ElasticsearchWatcherAsyncClient(this.transport, this.transportOptions);
	}

	public ElasticsearchXpackAsyncClient xpack() {
		return new ElasticsearchXpackAsyncClient(this.transport, this.transportOptions);
	}

	// ----- Endpoint: bulk

	/**
	 * Bulk index or delete documents. Perform multiple index,
	 * create, delete, and update actions in
	 * a single request. This reduces overhead and can greatly increase indexing
	 * speed.
	 * 

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To use the create action, you must have the * create_doc, create, index, or * write index privilege. Data streams support only the * create action.
  • *
  • To use the index action, you must have the * create, index, or write index * privilege.
  • *
  • To use the delete action, you must have the * delete or write index privilege.
  • *
  • To use the update action, you must have the * index or write index privilege.
  • *
  • To automatically create a data stream or index with a bulk API request, * you must have the auto_configure, create_index, or * manage index privilege.
  • *
  • To make the result of a bulk operation visible to search using the * refresh parameter, you must have the maintenance or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* The actions are specified in the request body using a newline delimited JSON * (NDJSON) structure: * *

	 * action_and_meta_data\n
	 * optional_source\n
	 * action_and_meta_data\n
	 * optional_source\n
	 * ....
	 * action_and_meta_data\n
	 * optional_source\n
	 * 
	 * 
*

* The index and create actions expect a source on the * next line and have the same semantics as the op_type parameter * in the standard index API. A create action fails if a document * with the same ID already exists in the target An index action * adds or replaces a document as necessary. *

* NOTE: Data streams support only the create action. To update or * delete a document in a data stream, you must target the backing index * containing the document. *

* An update action expects that the partial doc, upsert, and * script and its options are specified on the next line. *

* A delete action does not expect a source on the next line and * has the same semantics as the standard delete API. *

* NOTE: The final line of data must end with a newline character * (\n). Each newline character may be preceded by a carriage * return (\r). When sending NDJSON data to the _bulk * endpoint, use a Content-Type header of * application/json or application/x-ndjson. Because * this format uses literal newline characters (\n) as delimiters, * make sure that the JSON actions and sources are not pretty printed. *

* If you provide a target in the request path, it is used for any actions that * don't explicitly specify an _index argument. *

* A note on the format: the idea here is to make processing as fast as * possible. As some of the actions are redirected to other shards on other * nodes, only action_meta_data is parsed on the receiving node * side. *

* Client libraries using this protocol should try and strive to do something * similar on the client side, and reduce buffering as much as possible. *

* There is no "correct" number of actions to perform in a single bulk * request. Experiment with different settings to find the optimal size for your * particular workload. Note that Elasticsearch limits the maximum size of a * HTTP request to 100mb by default so clients must ensure that no request * exceeds this size. It is not possible to index a single document that exceeds * the size limit, so you must pre-process any such documents into smaller * pieces before sending them to Elasticsearch. For instance, split documents * into pages or chapters before indexing them, or store raw binary data in a * system outside Elasticsearch and replace the raw data with a link to the * external system in the documents that you send to Elasticsearch. *

* Client suppport for bulk requests *

* Some of the officially supported clients provide helpers to assist with bulk * requests and reindexing: *

    *
  • Go: Check out esutil.BulkIndexer
  • *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and * Search::Elasticsearch::Client::5_0::Scroll
  • *
  • Python: Check out elasticsearch.helpers.*
  • *
  • JavaScript: Check out client.helpers.*
  • *
  • .NET: Check out BulkAllObservable
  • *
  • PHP: Check out bulk indexing.
  • *
*

* Submitting bulk requests with cURL *

* If you're providing text file input to curl, you must use the * --data-binary flag instead of plain -d. The latter * doesn't preserve newlines. For example: * *

	 * $ cat requests
	 * { "index" : { "_index" : "test", "_id" : "1" } }
	 * { "field1" : "value1" }
	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
	 * 
	 * 
*

* Optimistic concurrency control *

* Each index and delete action within a bulk API call * may include the if_seq_no and if_primary_term * parameters in their respective action and meta data lines. The * if_seq_no and if_primary_term parameters control * how operations are run, based on the last modification to existing documents. * See Optimistic concurrency control for more details. *

* Versioning *

* Each bulk item can include the version value using the version * field. It automatically follows the behavior of the index or delete operation * based on the _version mapping. It also support the * version_type. *

* Routing *

* Each bulk item can include the routing value using the routing * field. It automatically follows the behavior of the index or delete operation * based on the _routing mapping. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Wait for active shards *

* When making bulk calls, you can set the wait_for_active_shards * parameter to require a minimum number of shard copies to be active before * starting to process the bulk request. *

* Refresh *

* Control when the changes made by this request are visible to search. *

* NOTE: Only the shards that receive the bulk request will be affected by * refresh. Imagine a _bulk?refresh=wait_for request with three * documents in it that happen to be routed to different shards in an index with * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. *

* You might want to disable the refresh interval temporarily to improve * indexing throughput for large bulk requests. Refer to the linked * documentation for step-by-step instructions using the index settings API. * * @see Documentation * on elastic.co */ public CompletableFuture bulk(BulkRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) BulkRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Bulk index or delete documents. Perform multiple index, * create, delete, and update actions in * a single request. This reduces overhead and can greatly increase indexing * speed. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To use the create action, you must have the * create_doc, create, index, or * write index privilege. Data streams support only the * create action.
  • *
  • To use the index action, you must have the * create, index, or write index * privilege.
  • *
  • To use the delete action, you must have the * delete or write index privilege.
  • *
  • To use the update action, you must have the * index or write index privilege.
  • *
  • To automatically create a data stream or index with a bulk API request, * you must have the auto_configure, create_index, or * manage index privilege.
  • *
  • To make the result of a bulk operation visible to search using the * refresh parameter, you must have the maintenance or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* The actions are specified in the request body using a newline delimited JSON * (NDJSON) structure: * *

	 * action_and_meta_data\n
	 * optional_source\n
	 * action_and_meta_data\n
	 * optional_source\n
	 * ....
	 * action_and_meta_data\n
	 * optional_source\n
	 * 
	 * 
*

* The index and create actions expect a source on the * next line and have the same semantics as the op_type parameter * in the standard index API. A create action fails if a document * with the same ID already exists in the target An index action * adds or replaces a document as necessary. *

* NOTE: Data streams support only the create action. To update or * delete a document in a data stream, you must target the backing index * containing the document. *

* An update action expects that the partial doc, upsert, and * script and its options are specified on the next line. *

* A delete action does not expect a source on the next line and * has the same semantics as the standard delete API. *

* NOTE: The final line of data must end with a newline character * (\n). Each newline character may be preceded by a carriage * return (\r). When sending NDJSON data to the _bulk * endpoint, use a Content-Type header of * application/json or application/x-ndjson. Because * this format uses literal newline characters (\n) as delimiters, * make sure that the JSON actions and sources are not pretty printed. *

* If you provide a target in the request path, it is used for any actions that * don't explicitly specify an _index argument. *

* A note on the format: the idea here is to make processing as fast as * possible. As some of the actions are redirected to other shards on other * nodes, only action_meta_data is parsed on the receiving node * side. *

* Client libraries using this protocol should try and strive to do something * similar on the client side, and reduce buffering as much as possible. *

* There is no "correct" number of actions to perform in a single bulk * request. Experiment with different settings to find the optimal size for your * particular workload. Note that Elasticsearch limits the maximum size of a * HTTP request to 100mb by default so clients must ensure that no request * exceeds this size. It is not possible to index a single document that exceeds * the size limit, so you must pre-process any such documents into smaller * pieces before sending them to Elasticsearch. For instance, split documents * into pages or chapters before indexing them, or store raw binary data in a * system outside Elasticsearch and replace the raw data with a link to the * external system in the documents that you send to Elasticsearch. *

* Client suppport for bulk requests *

* Some of the officially supported clients provide helpers to assist with bulk * requests and reindexing: *

    *
  • Go: Check out esutil.BulkIndexer
  • *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and * Search::Elasticsearch::Client::5_0::Scroll
  • *
  • Python: Check out elasticsearch.helpers.*
  • *
  • JavaScript: Check out client.helpers.*
  • *
  • .NET: Check out BulkAllObservable
  • *
  • PHP: Check out bulk indexing.
  • *
*

* Submitting bulk requests with cURL *

* If you're providing text file input to curl, you must use the * --data-binary flag instead of plain -d. The latter * doesn't preserve newlines. For example: * *

	 * $ cat requests
	 * { "index" : { "_index" : "test", "_id" : "1" } }
	 * { "field1" : "value1" }
	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
	 * 
	 * 
*

* Optimistic concurrency control *

* Each index and delete action within a bulk API call * may include the if_seq_no and if_primary_term * parameters in their respective action and meta data lines. The * if_seq_no and if_primary_term parameters control * how operations are run, based on the last modification to existing documents. * See Optimistic concurrency control for more details. *

* Versioning *

* Each bulk item can include the version value using the version * field. It automatically follows the behavior of the index or delete operation * based on the _version mapping. It also support the * version_type. *

* Routing *

* Each bulk item can include the routing value using the routing * field. It automatically follows the behavior of the index or delete operation * based on the _routing mapping. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Wait for active shards *

* When making bulk calls, you can set the wait_for_active_shards * parameter to require a minimum number of shard copies to be active before * starting to process the bulk request. *

* Refresh *

* Control when the changes made by this request are visible to search. *

* NOTE: Only the shards that receive the bulk request will be affected by * refresh. Imagine a _bulk?refresh=wait_for request with three * documents in it that happen to be routed to different shards in an index with * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. *

* You might want to disable the refresh interval temporarily to improve * indexing throughput for large bulk requests. Refer to the linked * documentation for step-by-step instructions using the index settings API. * * @param fn * a function that initializes a builder to create the * {@link BulkRequest} * @see Documentation * on elastic.co */ public final CompletableFuture bulk(Function> fn) { return bulk(fn.apply(new BulkRequest.Builder()).build()); } /** * Bulk index or delete documents. Perform multiple index, * create, delete, and update actions in * a single request. This reduces overhead and can greatly increase indexing * speed. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To use the create action, you must have the * create_doc, create, index, or * write index privilege. Data streams support only the * create action.
  • *
  • To use the index action, you must have the * create, index, or write index * privilege.
  • *
  • To use the delete action, you must have the * delete or write index privilege.
  • *
  • To use the update action, you must have the * index or write index privilege.
  • *
  • To automatically create a data stream or index with a bulk API request, * you must have the auto_configure, create_index, or * manage index privilege.
  • *
  • To make the result of a bulk operation visible to search using the * refresh parameter, you must have the maintenance or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* The actions are specified in the request body using a newline delimited JSON * (NDJSON) structure: * *

	 * action_and_meta_data\n
	 * optional_source\n
	 * action_and_meta_data\n
	 * optional_source\n
	 * ....
	 * action_and_meta_data\n
	 * optional_source\n
	 * 
	 * 
*

* The index and create actions expect a source on the * next line and have the same semantics as the op_type parameter * in the standard index API. A create action fails if a document * with the same ID already exists in the target An index action * adds or replaces a document as necessary. *

* NOTE: Data streams support only the create action. To update or * delete a document in a data stream, you must target the backing index * containing the document. *

* An update action expects that the partial doc, upsert, and * script and its options are specified on the next line. *

* A delete action does not expect a source on the next line and * has the same semantics as the standard delete API. *

* NOTE: The final line of data must end with a newline character * (\n). Each newline character may be preceded by a carriage * return (\r). When sending NDJSON data to the _bulk * endpoint, use a Content-Type header of * application/json or application/x-ndjson. Because * this format uses literal newline characters (\n) as delimiters, * make sure that the JSON actions and sources are not pretty printed. *

* If you provide a target in the request path, it is used for any actions that * don't explicitly specify an _index argument. *

* A note on the format: the idea here is to make processing as fast as * possible. As some of the actions are redirected to other shards on other * nodes, only action_meta_data is parsed on the receiving node * side. *

* Client libraries using this protocol should try and strive to do something * similar on the client side, and reduce buffering as much as possible. *

* There is no "correct" number of actions to perform in a single bulk * request. Experiment with different settings to find the optimal size for your * particular workload. Note that Elasticsearch limits the maximum size of a * HTTP request to 100mb by default so clients must ensure that no request * exceeds this size. It is not possible to index a single document that exceeds * the size limit, so you must pre-process any such documents into smaller * pieces before sending them to Elasticsearch. For instance, split documents * into pages or chapters before indexing them, or store raw binary data in a * system outside Elasticsearch and replace the raw data with a link to the * external system in the documents that you send to Elasticsearch. *

* Client suppport for bulk requests *

* Some of the officially supported clients provide helpers to assist with bulk * requests and reindexing: *

    *
  • Go: Check out esutil.BulkIndexer
  • *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and * Search::Elasticsearch::Client::5_0::Scroll
  • *
  • Python: Check out elasticsearch.helpers.*
  • *
  • JavaScript: Check out client.helpers.*
  • *
  • .NET: Check out BulkAllObservable
  • *
  • PHP: Check out bulk indexing.
  • *
*

* Submitting bulk requests with cURL *

* If you're providing text file input to curl, you must use the * --data-binary flag instead of plain -d. The latter * doesn't preserve newlines. For example: * *

	 * $ cat requests
	 * { "index" : { "_index" : "test", "_id" : "1" } }
	 * { "field1" : "value1" }
	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
	 * 
	 * 
*

* Optimistic concurrency control *

* Each index and delete action within a bulk API call * may include the if_seq_no and if_primary_term * parameters in their respective action and meta data lines. The * if_seq_no and if_primary_term parameters control * how operations are run, based on the last modification to existing documents. * See Optimistic concurrency control for more details. *

* Versioning *

* Each bulk item can include the version value using the version * field. It automatically follows the behavior of the index or delete operation * based on the _version mapping. It also support the * version_type. *

* Routing *

* Each bulk item can include the routing value using the routing * field. It automatically follows the behavior of the index or delete operation * based on the _routing mapping. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Wait for active shards *

* When making bulk calls, you can set the wait_for_active_shards * parameter to require a minimum number of shard copies to be active before * starting to process the bulk request. *

* Refresh *

* Control when the changes made by this request are visible to search. *

* NOTE: Only the shards that receive the bulk request will be affected by * refresh. Imagine a _bulk?refresh=wait_for request with three * documents in it that happen to be routed to different shards in an index with * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. *

* You might want to disable the refresh interval temporarily to improve * indexing throughput for large bulk requests. Refer to the linked * documentation for step-by-step instructions using the index settings API. * * @see Documentation * on elastic.co */ public CompletableFuture bulk() { return this.transport.performRequestAsync(new BulkRequest.Builder().build(), BulkRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: clear_scroll /** * Clear a scrolling search. Clear the search context and results for a * scrolling search. * * @see Documentation * on elastic.co */ public CompletableFuture clearScroll(ClearScrollRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ClearScrollRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Clear a scrolling search. Clear the search context and results for a * scrolling search. * * @param fn * a function that initializes a builder to create the * {@link ClearScrollRequest} * @see Documentation * on elastic.co */ public final CompletableFuture clearScroll( Function> fn) { return clearScroll(fn.apply(new ClearScrollRequest.Builder()).build()); } /** * Clear a scrolling search. Clear the search context and results for a * scrolling search. * * @see Documentation * on elastic.co */ public CompletableFuture clearScroll() { return this.transport.performRequestAsync(new ClearScrollRequest.Builder().build(), ClearScrollRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: close_point_in_time /** * Close a point in time. A point in time must be opened explicitly before being * used in search requests. The keep_alive parameter tells * Elasticsearch how long it should persist. A point in time is automatically * closed when the keep_alive period has elapsed. However, keeping * points in time has a cost; close them as soon as they are no longer required * for search requests. * * @see Documentation * on elastic.co */ public CompletableFuture closePointInTime(ClosePointInTimeRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ClosePointInTimeRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Close a point in time. A point in time must be opened explicitly before being * used in search requests. The keep_alive parameter tells * Elasticsearch how long it should persist. A point in time is automatically * closed when the keep_alive period has elapsed. However, keeping * points in time has a cost; close them as soon as they are no longer required * for search requests. * * @param fn * a function that initializes a builder to create the * {@link ClosePointInTimeRequest} * @see Documentation * on elastic.co */ public final CompletableFuture closePointInTime( Function> fn) { return closePointInTime(fn.apply(new ClosePointInTimeRequest.Builder()).build()); } // ----- Endpoint: count /** * Count search results. Get the number of documents matching a query. *

* The query can be provided either by using a simple query string as a * parameter, or by defining Query DSL within the request body. The query is * optional. When no query is provided, the API uses match_all to * count all the documents. *

* The count API supports multi-target syntax. You can run a single count API * search across multiple data streams and indices. *

* The operation is broadcast across all shards. For each shard ID group, a * replica is chosen and the search is run against it. This means that replicas * increase the scalability of the count. * * @see Documentation * on elastic.co */ public CompletableFuture count(CountRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CountRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Count search results. Get the number of documents matching a query. *

* The query can be provided either by using a simple query string as a * parameter, or by defining Query DSL within the request body. The query is * optional. When no query is provided, the API uses match_all to * count all the documents. *

* The count API supports multi-target syntax. You can run a single count API * search across multiple data streams and indices. *

* The operation is broadcast across all shards. For each shard ID group, a * replica is chosen and the search is run against it. This means that replicas * increase the scalability of the count. * * @param fn * a function that initializes a builder to create the * {@link CountRequest} * @see Documentation * on elastic.co */ public final CompletableFuture count( Function> fn) { return count(fn.apply(new CountRequest.Builder()).build()); } /** * Count search results. Get the number of documents matching a query. *

* The query can be provided either by using a simple query string as a * parameter, or by defining Query DSL within the request body. The query is * optional. When no query is provided, the API uses match_all to * count all the documents. *

* The count API supports multi-target syntax. You can run a single count API * search across multiple data streams and indices. *

* The operation is broadcast across all shards. For each shard ID group, a * replica is chosen and the search is run against it. This means that replicas * increase the scalability of the count. * * @see Documentation * on elastic.co */ public CompletableFuture count() { return this.transport.performRequestAsync(new CountRequest.Builder().build(), CountRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: create /** * Create a new document in the index. *

* You can index a new JSON document with the /<target>/_doc/ * or /<target>/_create/<_id> APIs Using * _create guarantees that the document is indexed only if it does * not already exist. It returns a 409 response when a document with a same ID * already exists in the index. To update an existing document, you must use the * /<target>/_doc/ API. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To add a document using the * PUT /<target>/_create/<_id> or * POST /<target>/_create/<_id> request formats, you * must have the create_doc, create, * index, or write index privilege.
  • *
  • To automatically create a data stream or index with this API request, you * must have the auto_configure, create_index, or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* Automatically create data streams and indices *

* If the request's target doesn't exist and matches an index template with a * data_stream definition, the index operation automatically * creates the data stream. *

* If the target doesn't exist and doesn't match a data stream template, the * operation automatically creates the index and applies any matching index * templates. *

* NOTE: Elasticsearch includes several built-in index templates. To avoid * naming collisions with these templates, refer to index pattern documentation. *

* If no mapping exists, the index operation creates a dynamic mapping. By * default, new fields and objects are automatically added to the mapping if * needed. *

* Automatic index creation is controlled by the * action.auto_create_index setting. If it is true, * any index can be created automatically. You can modify this setting to * explicitly allow or block automatic creation of indices that match specified * patterns or set it to false to turn off automatic index creation * entirely. Specify a comma-separated list of patterns you want to allow or * prefix each pattern with + or - to indicate whether * it should be allowed or blocked. When a list is specified, the default * behaviour is to disallow. *

* NOTE: The action.auto_create_index setting affects the automatic * creation of indices only. It does not affect the creation of data streams. *

* Routing *

* By default, shard placement — or routing — is controlled by using a hash of * the document's ID value. For more explicit control, the value fed into the * hash function used by the router can be directly specified on a per-operation * basis using the routing parameter. *

* When setting up explicit mapping, you can also use the _routing * field to direct the index operation to extract the routing value from the * document itself. This does come at the (very minimal) cost of an additional * document parsing pass. If the _routing mapping is defined and * set to be required, the index operation will fail if no routing value is * provided or extracted. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Distributed *

* The index operation is directed to the primary shard based on its route and * performed on the actual node containing this shard. After the primary shard * completes the operation, if needed, the update is distributed to applicable * replicas. *

* Active shards *

* To improve the resiliency of writes to the system, indexing operations can be * configured to wait for a certain number of active shard copies before * proceeding with the operation. If the requisite number of active shard copies * are not available, then the write operation must wait and retry, until either * the requisite shard copies have started or a timeout occurs. By default, * write operations only wait for the primary shards to be active before * proceeding (that is to say wait_for_active_shards is * 1). This default can be overridden in the index settings * dynamically by setting index.write.wait_for_active_shards. To * alter this behavior per operation, use the * wait_for_active_shards request parameter. *

* Valid values are all or any positive integer up to the total number of * configured copies per shard in the index (which is * number_of_replicas+1). Specifying a negative value or a number * greater than the number of shard copies will throw an error. *

* For example, suppose you have a cluster of three nodes, A, B, and C and you * create an index index with the number of replicas set to 3 (resulting in 4 * shard copies, one more copy than there are nodes). If you attempt an indexing * operation, by default the operation will only ensure the primary copy of each * shard is available before proceeding. This means that even if B and C went * down and A hosted the primary shard copies, the indexing operation would * still proceed with only one copy of the data. If * wait_for_active_shards is set on the request to 3 * (and all three nodes are up), the indexing operation will require 3 active * shard copies before proceeding. This requirement should be met because there * are 3 active nodes in the cluster, each one holding a copy of the shard. * However, if you set wait_for_active_shards to all * (or to 4, which is the same in this situation), the indexing * operation will not proceed as you do not have all 4 copies of each shard * active in the index. The operation will timeout unless a new node is brought * up in the cluster to host the fourth copy of the shard. *

* It is important to note that this setting greatly reduces the chances of the * write operation not writing to the requisite number of shard copies, but it * does not completely eliminate the possibility, because this check occurs * before the write operation starts. After the write operation is underway, it * is still possible for replication to fail on any number of shard copies but * still succeed on the primary. The _shards section of the API * response reveals the number of shard copies on which replication succeeded * and failed. * * @see Documentation * on elastic.co */ public CompletableFuture create(CreateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, CreateResponse, ErrorResponse> endpoint = (JsonEndpoint, CreateResponse, ErrorResponse>) CreateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create a new document in the index. *

* You can index a new JSON document with the /<target>/_doc/ * or /<target>/_create/<_id> APIs Using * _create guarantees that the document is indexed only if it does * not already exist. It returns a 409 response when a document with a same ID * already exists in the index. To update an existing document, you must use the * /<target>/_doc/ API. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To add a document using the * PUT /<target>/_create/<_id> or * POST /<target>/_create/<_id> request formats, you * must have the create_doc, create, * index, or write index privilege.
  • *
  • To automatically create a data stream or index with this API request, you * must have the auto_configure, create_index, or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* Automatically create data streams and indices *

* If the request's target doesn't exist and matches an index template with a * data_stream definition, the index operation automatically * creates the data stream. *

* If the target doesn't exist and doesn't match a data stream template, the * operation automatically creates the index and applies any matching index * templates. *

* NOTE: Elasticsearch includes several built-in index templates. To avoid * naming collisions with these templates, refer to index pattern documentation. *

* If no mapping exists, the index operation creates a dynamic mapping. By * default, new fields and objects are automatically added to the mapping if * needed. *

* Automatic index creation is controlled by the * action.auto_create_index setting. If it is true, * any index can be created automatically. You can modify this setting to * explicitly allow or block automatic creation of indices that match specified * patterns or set it to false to turn off automatic index creation * entirely. Specify a comma-separated list of patterns you want to allow or * prefix each pattern with + or - to indicate whether * it should be allowed or blocked. When a list is specified, the default * behaviour is to disallow. *

* NOTE: The action.auto_create_index setting affects the automatic * creation of indices only. It does not affect the creation of data streams. *

* Routing *

* By default, shard placement — or routing — is controlled by using a hash of * the document's ID value. For more explicit control, the value fed into the * hash function used by the router can be directly specified on a per-operation * basis using the routing parameter. *

* When setting up explicit mapping, you can also use the _routing * field to direct the index operation to extract the routing value from the * document itself. This does come at the (very minimal) cost of an additional * document parsing pass. If the _routing mapping is defined and * set to be required, the index operation will fail if no routing value is * provided or extracted. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Distributed *

* The index operation is directed to the primary shard based on its route and * performed on the actual node containing this shard. After the primary shard * completes the operation, if needed, the update is distributed to applicable * replicas. *

* Active shards *

* To improve the resiliency of writes to the system, indexing operations can be * configured to wait for a certain number of active shard copies before * proceeding with the operation. If the requisite number of active shard copies * are not available, then the write operation must wait and retry, until either * the requisite shard copies have started or a timeout occurs. By default, * write operations only wait for the primary shards to be active before * proceeding (that is to say wait_for_active_shards is * 1). This default can be overridden in the index settings * dynamically by setting index.write.wait_for_active_shards. To * alter this behavior per operation, use the * wait_for_active_shards request parameter. *

* Valid values are all or any positive integer up to the total number of * configured copies per shard in the index (which is * number_of_replicas+1). Specifying a negative value or a number * greater than the number of shard copies will throw an error. *

* For example, suppose you have a cluster of three nodes, A, B, and C and you * create an index index with the number of replicas set to 3 (resulting in 4 * shard copies, one more copy than there are nodes). If you attempt an indexing * operation, by default the operation will only ensure the primary copy of each * shard is available before proceeding. This means that even if B and C went * down and A hosted the primary shard copies, the indexing operation would * still proceed with only one copy of the data. If * wait_for_active_shards is set on the request to 3 * (and all three nodes are up), the indexing operation will require 3 active * shard copies before proceeding. This requirement should be met because there * are 3 active nodes in the cluster, each one holding a copy of the shard. * However, if you set wait_for_active_shards to all * (or to 4, which is the same in this situation), the indexing * operation will not proceed as you do not have all 4 copies of each shard * active in the index. The operation will timeout unless a new node is brought * up in the cluster to host the fourth copy of the shard. *

* It is important to note that this setting greatly reduces the chances of the * write operation not writing to the requisite number of shard copies, but it * does not completely eliminate the possibility, because this check occurs * before the write operation starts. After the write operation is underway, it * is still possible for replication to fail on any number of shard copies but * still succeed on the primary. The _shards section of the API * response reveals the number of shard copies on which replication succeeded * and failed. * * @param fn * a function that initializes a builder to create the * {@link CreateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture create( Function, ObjectBuilder>> fn) { return create(fn.apply(new CreateRequest.Builder()).build()); } // ----- Endpoint: delete /** * Delete a document. *

* Remove a JSON document from the specified index. *

* NOTE: You cannot send deletion requests directly to a data stream. To delete * a document in a data stream, you must target the backing index containing the * document. *

* Optimistic concurrency control *

* Delete operations can be made conditional and only be performed if the last * modification to the document was assigned the sequence number and primary * term specified by the if_seq_no and if_primary_term * parameters. If a mismatch is detected, the operation will result in a * VersionConflictException and a status code of 409. *

* Versioning *

* Each document indexed is versioned. When deleting a document, the version can * be specified to make sure the relevant document you are trying to delete is * actually being deleted and it has not changed in the meantime. Every write * operation run on a document, deletes included, causes its version to be * incremented. The version number of a deleted document remains available for a * short time after deletion to allow for control of concurrent operations. The * length of time for which a deleted document's version remains available is * determined by the index.gc_deletes index setting. *

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to delete a document. *

* If the _routing mapping is set to required and no * routing value is specified, the delete API throws a * RoutingMissingException and rejects the request. *

* For example: * *

	 * DELETE /my-index-000001/_doc/1?routing=shard-1
	 * 
	 * 
*

* This request deletes the document with ID 1, but it is routed based on the * user. The document is not deleted if the correct routing is not specified. *

* Distributed *

* The delete operation gets hashed into a specific shard ID. It then gets * redirected into the primary shard within that ID group and replicated (if * needed) to shard replicas within that ID group. * * @see Documentation * on elastic.co */ public CompletableFuture delete(DeleteRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete a document. *

* Remove a JSON document from the specified index. *

* NOTE: You cannot send deletion requests directly to a data stream. To delete * a document in a data stream, you must target the backing index containing the * document. *

* Optimistic concurrency control *

* Delete operations can be made conditional and only be performed if the last * modification to the document was assigned the sequence number and primary * term specified by the if_seq_no and if_primary_term * parameters. If a mismatch is detected, the operation will result in a * VersionConflictException and a status code of 409. *

* Versioning *

* Each document indexed is versioned. When deleting a document, the version can * be specified to make sure the relevant document you are trying to delete is * actually being deleted and it has not changed in the meantime. Every write * operation run on a document, deletes included, causes its version to be * incremented. The version number of a deleted document remains available for a * short time after deletion to allow for control of concurrent operations. The * length of time for which a deleted document's version remains available is * determined by the index.gc_deletes index setting. *

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to delete a document. *

* If the _routing mapping is set to required and no * routing value is specified, the delete API throws a * RoutingMissingException and rejects the request. *

* For example: * *

	 * DELETE /my-index-000001/_doc/1?routing=shard-1
	 * 
	 * 
*

* This request deletes the document with ID 1, but it is routed based on the * user. The document is not deleted if the correct routing is not specified. *

* Distributed *

* The delete operation gets hashed into a specific shard ID. It then gets * redirected into the primary shard within that ID group and replicated (if * needed) to shard replicas within that ID group. * * @param fn * a function that initializes a builder to create the * {@link DeleteRequest} * @see Documentation * on elastic.co */ public final CompletableFuture delete( Function> fn) { return delete(fn.apply(new DeleteRequest.Builder()).build()); } // ----- Endpoint: delete_by_query /** * Delete documents. *

* Deletes documents that match the specified query. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or alias: *

    *
  • read
  • *
  • delete or write
  • *
*

* You can specify the query criteria in the request URI or the request body * using the same syntax as the search API. When you submit a delete by query * request, Elasticsearch gets a snapshot of the data stream or index when it * begins processing the request and deletes matching documents using internal * versioning. If a document changes between the time that the snapshot is taken * and the delete operation is processed, it results in a version conflict and * the delete operation fails. *

* NOTE: Documents with a version equal to 0 cannot be deleted using delete by * query because internal versioning does not support 0 as a valid version * number. *

* While processing a delete by query request, Elasticsearch performs multiple * search requests sequentially to find all of the matching documents to delete. * A bulk delete request is performed for each batch of matching documents. If a * search or bulk request is rejected, the requests are retried up to 10 times, * with exponential back off. If the maximum retry limit is reached, processing * halts and all failed requests are returned in the response. Any delete * requests that completed successfully still stick, they are not rolled back. *

* You can opt to count version conflicts instead of halting and returning by * setting conflicts to proceed. Note that if you opt * to count version conflicts the operation could attempt to delete more * documents from the source than max_docs until it has * successfully deleted max_docs documents, or it has gone through * every document in the source query. *

* Throttling delete requests *

* To control the rate at which delete by query issues batches of delete * operations, you can set requests_per_second to any positive * decimal number. This pads each batch with a wait time to throttle the rate. * Set requests_per_second to -1 to disable * throttling. *

* Throttling uses a wait time between batches so that the internal scroll * requests can be given a timeout that takes the request padding into account. * The padding time is the difference between the batch size divided by the * requests_per_second and the time spent writing. By default the * batch size is 1000, so if requests_per_second is * set to 500: * *

	 * target_time = 1000 / 500 per second = 2 seconds
	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
	 * 
	 * 
*

* Since the batch is issued as a single _bulk request, large batch * sizes cause Elasticsearch to create many requests and wait before starting * the next set. This is "bursty" instead of "smooth". *

* Slicing *

* Delete by query supports sliced scroll to parallelize the delete process. * This can improve efficiency and provide a convenient way to break the request * down into smaller parts. *

* Setting slices to auto lets Elasticsearch choose * the number of slices to use. This setting will use one slice per shard, up to * a certain limit. If there are multiple source data streams or indices, it * will choose the number of slices based on the index or backing index with the * smallest number of shards. Adding slices to the delete by query operation * creates sub-requests which means it has some quirks: *

    *
  • You can see these requests in the tasks APIs. These sub-requests are * "child" tasks of the task for the request with slices.
  • *
  • Fetching the status of the task for the request with slices only contains * the status of completed slices.
  • *
  • These sub-requests are individually addressable for things like * cancellation and rethrottling.
  • *
  • Rethrottling the request with slices will rethrottle the * unfinished sub-request proportionally.
  • *
  • Canceling the request with slices will cancel each * sub-request.
  • *
  • Due to the nature of slices each sub-request won't get a * perfectly even portion of the documents. All documents will be addressed, but * some slices may be larger than others. Expect larger slices to have a more * even distribution.
  • *
  • Parameters like requests_per_second and * max_docs on a request with slices are distributed * proportionally to each sub-request. Combine that with the earlier point about * distribution being uneven and you should conclude that using * max_docs with slices might not result in exactly * max_docs documents being deleted.
  • *
  • Each sub-request gets a slightly different snapshot of the source data * stream or index though these are all taken at approximately the same * time.
  • *
*

* If you're slicing manually or otherwise tuning automatic slicing, keep in * mind that: *

    *
  • Query performance is most efficient when the number of slices is equal to * the number of shards in the index or backing index. If that number is large * (for example, 500), choose a lower number as too many slices * hurts performance. Setting slices higher than the number of * shards generally does not improve efficiency and adds overhead.
  • *
  • Delete performance scales linearly across available resources with the * number of slices.
  • *
*

* Whether query or delete performance dominates the runtime depends on the * documents being reindexed and cluster resources. *

* Cancel a delete by query operation *

* Any delete by query can be canceled using the task cancel API. For example: * *

	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
	 * 
	 * 
*

* The task ID can be found by using the get tasks API. *

* Cancellation should happen quickly but might take a few seconds. The get task * status API will continue to list the delete by query task until this task * checks that it has been cancelled and terminates itself. * * @see Documentation * on elastic.co */ public CompletableFuture deleteByQuery(DeleteByQueryRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete documents. *

* Deletes documents that match the specified query. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or alias: *

    *
  • read
  • *
  • delete or write
  • *
*

* You can specify the query criteria in the request URI or the request body * using the same syntax as the search API. When you submit a delete by query * request, Elasticsearch gets a snapshot of the data stream or index when it * begins processing the request and deletes matching documents using internal * versioning. If a document changes between the time that the snapshot is taken * and the delete operation is processed, it results in a version conflict and * the delete operation fails. *

* NOTE: Documents with a version equal to 0 cannot be deleted using delete by * query because internal versioning does not support 0 as a valid version * number. *

* While processing a delete by query request, Elasticsearch performs multiple * search requests sequentially to find all of the matching documents to delete. * A bulk delete request is performed for each batch of matching documents. If a * search or bulk request is rejected, the requests are retried up to 10 times, * with exponential back off. If the maximum retry limit is reached, processing * halts and all failed requests are returned in the response. Any delete * requests that completed successfully still stick, they are not rolled back. *

* You can opt to count version conflicts instead of halting and returning by * setting conflicts to proceed. Note that if you opt * to count version conflicts the operation could attempt to delete more * documents from the source than max_docs until it has * successfully deleted max_docs documents, or it has gone through * every document in the source query. *

* Throttling delete requests *

* To control the rate at which delete by query issues batches of delete * operations, you can set requests_per_second to any positive * decimal number. This pads each batch with a wait time to throttle the rate. * Set requests_per_second to -1 to disable * throttling. *

* Throttling uses a wait time between batches so that the internal scroll * requests can be given a timeout that takes the request padding into account. * The padding time is the difference between the batch size divided by the * requests_per_second and the time spent writing. By default the * batch size is 1000, so if requests_per_second is * set to 500: * *

	 * target_time = 1000 / 500 per second = 2 seconds
	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
	 * 
	 * 
*

* Since the batch is issued as a single _bulk request, large batch * sizes cause Elasticsearch to create many requests and wait before starting * the next set. This is "bursty" instead of "smooth". *

* Slicing *

* Delete by query supports sliced scroll to parallelize the delete process. * This can improve efficiency and provide a convenient way to break the request * down into smaller parts. *

* Setting slices to auto lets Elasticsearch choose * the number of slices to use. This setting will use one slice per shard, up to * a certain limit. If there are multiple source data streams or indices, it * will choose the number of slices based on the index or backing index with the * smallest number of shards. Adding slices to the delete by query operation * creates sub-requests which means it has some quirks: *

    *
  • You can see these requests in the tasks APIs. These sub-requests are * "child" tasks of the task for the request with slices.
  • *
  • Fetching the status of the task for the request with slices only contains * the status of completed slices.
  • *
  • These sub-requests are individually addressable for things like * cancellation and rethrottling.
  • *
  • Rethrottling the request with slices will rethrottle the * unfinished sub-request proportionally.
  • *
  • Canceling the request with slices will cancel each * sub-request.
  • *
  • Due to the nature of slices each sub-request won't get a * perfectly even portion of the documents. All documents will be addressed, but * some slices may be larger than others. Expect larger slices to have a more * even distribution.
  • *
  • Parameters like requests_per_second and * max_docs on a request with slices are distributed * proportionally to each sub-request. Combine that with the earlier point about * distribution being uneven and you should conclude that using * max_docs with slices might not result in exactly * max_docs documents being deleted.
  • *
  • Each sub-request gets a slightly different snapshot of the source data * stream or index though these are all taken at approximately the same * time.
  • *
*

* If you're slicing manually or otherwise tuning automatic slicing, keep in * mind that: *

    *
  • Query performance is most efficient when the number of slices is equal to * the number of shards in the index or backing index. If that number is large * (for example, 500), choose a lower number as too many slices * hurts performance. Setting slices higher than the number of * shards generally does not improve efficiency and adds overhead.
  • *
  • Delete performance scales linearly across available resources with the * number of slices.
  • *
*

* Whether query or delete performance dominates the runtime depends on the * documents being reindexed and cluster resources. *

* Cancel a delete by query operation *

* Any delete by query can be canceled using the task cancel API. For example: * *

	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
	 * 
	 * 
*

* The task ID can be found by using the get tasks API. *

* Cancellation should happen quickly but might take a few seconds. The get task * status API will continue to list the delete by query task until this task * checks that it has been cancelled and terminates itself. * * @param fn * a function that initializes a builder to create the * {@link DeleteByQueryRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteByQuery( Function> fn) { return deleteByQuery(fn.apply(new DeleteByQueryRequest.Builder()).build()); } // ----- Endpoint: delete_by_query_rethrottle /** * Throttle a delete by query operation. *

* Change the number of requests per second for a particular delete by query * operation. Rethrottling that speeds up the query takes effect immediately but * rethrotting that slows down the query takes effect after completing the * current batch to prevent scroll timeouts. * * @see Documentation * on elastic.co */ public CompletableFuture deleteByQueryRethrottle( DeleteByQueryRethrottleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRethrottleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Throttle a delete by query operation. *

* Change the number of requests per second for a particular delete by query * operation. Rethrottling that speeds up the query takes effect immediately but * rethrotting that slows down the query takes effect after completing the * current batch to prevent scroll timeouts. * * @param fn * a function that initializes a builder to create the * {@link DeleteByQueryRethrottleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteByQueryRethrottle( Function> fn) { return deleteByQueryRethrottle(fn.apply(new DeleteByQueryRethrottleRequest.Builder()).build()); } // ----- Endpoint: delete_script /** * Delete a script or search template. Deletes a stored script or search * template. * * @see Documentation * on elastic.co */ public CompletableFuture deleteScript(DeleteScriptRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteScriptRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete a script or search template. Deletes a stored script or search * template. * * @param fn * a function that initializes a builder to create the * {@link DeleteScriptRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteScript( Function> fn) { return deleteScript(fn.apply(new DeleteScriptRequest.Builder()).build()); } // ----- Endpoint: exists /** * Check a document. *

* Verify that a document exists. For example, check to see if a document with * the _id 0 exists: * *

	 * HEAD my-index-000001/_doc/0
	 * 
	 * 
*

* If the document exists, the API returns a status code of * 200 - OK. If the document doesn’t exist, the API returns * 404 - Not Found. *

* Versioning support *

* You can use the version parameter to check the document only if * its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @see Documentation * on elastic.co */ public CompletableFuture exists(ExistsRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check a document. *

* Verify that a document exists. For example, check to see if a document with * the _id 0 exists: * *

	 * HEAD my-index-000001/_doc/0
	 * 
	 * 
*

* If the document exists, the API returns a status code of * 200 - OK. If the document doesn’t exist, the API returns * 404 - Not Found. *

* Versioning support *

* You can use the version parameter to check the document only if * its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture exists( Function> fn) { return exists(fn.apply(new ExistsRequest.Builder()).build()); } // ----- Endpoint: exists_source /** * Check for a document source. *

* Check whether a document source exists in an index. For example: * *

	 * HEAD my-index-000001/_source/1
	 * 
	 * 
*

* A document's source is not available if it is disabled in the mapping. * * @see Documentation * on elastic.co */ public CompletableFuture existsSource(ExistsSourceRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsSourceRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check for a document source. *

* Check whether a document source exists in an index. For example: * *

	 * HEAD my-index-000001/_source/1
	 * 
	 * 
*

* A document's source is not available if it is disabled in the mapping. * * @param fn * a function that initializes a builder to create the * {@link ExistsSourceRequest} * @see Documentation * on elastic.co */ public final CompletableFuture existsSource( Function> fn) { return existsSource(fn.apply(new ExistsSourceRequest.Builder()).build()); } // ----- Endpoint: explain /** * Explain a document match result. Get information about why a specific * document matches, or doesn't match, a query. It computes a score explanation * for a query and a specific document. * * @see Documentation * on elastic.co */ public CompletableFuture> explain(ExplainRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Explain a document match result. Get information about why a specific * document matches, or doesn't match, a query. It computes a score explanation * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> explain( Function> fn, Class tDocumentClass) { return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #explain(ExplainRequest, Class)}, where Class is defined * as Void, meaning the documents will not be deserialized. */ public CompletableFuture> explain(ExplainRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #explain(Function, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public final CompletableFuture> explain( Function> fn) { return explain(fn.apply(new ExplainRequest.Builder()).build(), Void.class); } /** * Explain a document match result. Get information about why a specific * document matches, or doesn't match, a query. It computes a score explanation * for a query and a specific document. * * @see Documentation * on elastic.co */ public CompletableFuture> explain(ExplainRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Explain a document match result. Get information about why a specific * document matches, or doesn't match, a query. It computes a score explanation * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> explain( Function> fn, Type tDocumentType) { return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: field_caps /** * Get the field capabilities. *

* Get information about the capabilities of fields among multiple indices. *

* For data streams, the API returns field capabilities among the stream’s * backing indices. It returns runtime fields like any other field. For example, * a runtime field with a type of keyword is returned the same as any other * field that belongs to the keyword family. * * @see Documentation * on elastic.co */ public CompletableFuture fieldCaps(FieldCapsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) FieldCapsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get the field capabilities. *

* Get information about the capabilities of fields among multiple indices. *

* For data streams, the API returns field capabilities among the stream’s * backing indices. It returns runtime fields like any other field. For example, * a runtime field with a type of keyword is returned the same as any other * field that belongs to the keyword family. * * @param fn * a function that initializes a builder to create the * {@link FieldCapsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture fieldCaps( Function> fn) { return fieldCaps(fn.apply(new FieldCapsRequest.Builder()).build()); } /** * Get the field capabilities. *

* Get information about the capabilities of fields among multiple indices. *

* For data streams, the API returns field capabilities among the stream’s * backing indices. It returns runtime fields like any other field. For example, * a runtime field with a type of keyword is returned the same as any other * field that belongs to the keyword family. * * @see Documentation * on elastic.co */ public CompletableFuture fieldCaps() { return this.transport.performRequestAsync(new FieldCapsRequest.Builder().build(), FieldCapsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: get /** * Get a document by its ID. *

* Get a document and its source or stored fields from an index. *

* By default, this API is realtime and is not affected by the refresh rate of * the index (when data will become visible for search). In the case where * stored fields are requested with the stored_fields parameter and * the document has been updated but is not yet refreshed, the API will have to * parse and analyze the source to extract the stored fields. To turn off * realtime behavior, set the realtime parameter to false. *

* Source filtering *

* By default, the API returns the contents of the _source field * unless you have used the stored_fields parameter or the * _source field is turned off. You can turn off * _source retrieval by using the _source parameter: * *

	 * GET my-index-000001/_doc/0?_source=false
	 * 
	 * 
*

* If you only need one or two fields from the _source, use the * _source_includes or _source_excludes parameters to * include or filter out particular fields. This can be helpful with large * documents where partial retrieval can save on network overhead Both * parameters take a comma separated list of fields or wildcard expressions. For * example: * *

	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
*

* If you only want to specify includes, you can use a shorter notation: * *

	 * GET my-index-000001/_doc/0?_source=*.id
	 * 
	 * 
*

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to retrieve a document. For example: * *

	 * GET my-index-000001/_doc/2?routing=user1
	 * 
	 * 
*

* This request gets the document with ID 2, but it is routed based on the user. * The document is not fetched if the correct routing is not specified. *

* Distributed *

* The GET operation is hashed into a specific shard ID. It is then redirected * to one of the replicas within that shard ID and returns the result. The * replicas are the primary shard and its replicas within that shard ID group. * This means that the more replicas you have, the better your GET scaling will * be. *

* Versioning support *

* You can use the version parameter to retrieve the document only * if its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @see Documentation * on elastic.co */ public CompletableFuture> get(GetRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get a document by its ID. *

* Get a document and its source or stored fields from an index. *

* By default, this API is realtime and is not affected by the refresh rate of * the index (when data will become visible for search). In the case where * stored fields are requested with the stored_fields parameter and * the document has been updated but is not yet refreshed, the API will have to * parse and analyze the source to extract the stored fields. To turn off * realtime behavior, set the realtime parameter to false. *

* Source filtering *

* By default, the API returns the contents of the _source field * unless you have used the stored_fields parameter or the * _source field is turned off. You can turn off * _source retrieval by using the _source parameter: * *

	 * GET my-index-000001/_doc/0?_source=false
	 * 
	 * 
*

* If you only need one or two fields from the _source, use the * _source_includes or _source_excludes parameters to * include or filter out particular fields. This can be helpful with large * documents where partial retrieval can save on network overhead Both * parameters take a comma separated list of fields or wildcard expressions. For * example: * *

	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
*

* If you only want to specify includes, you can use a shorter notation: * *

	 * GET my-index-000001/_doc/0?_source=*.id
	 * 
	 * 
*

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to retrieve a document. For example: * *

	 * GET my-index-000001/_doc/2?routing=user1
	 * 
	 * 
*

* This request gets the document with ID 2, but it is routed based on the user. * The document is not fetched if the correct routing is not specified. *

* Distributed *

* The GET operation is hashed into a specific shard ID. It is then redirected * to one of the replicas within that shard ID and returns the result. The * replicas are the primary shard and its replicas within that shard ID group. * This means that the more replicas you have, the better your GET scaling will * be. *

* Versioning support *

* You can use the version parameter to retrieve the document only * if its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> get( Function> fn, Class tDocumentClass) { return get(fn.apply(new GetRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #get(GetRequest, Class)}, where Class is defined as Void, * meaning the documents will not be deserialized. */ public CompletableFuture> get(GetRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #get(Function, Class)}, where Class is defined as Void, * meaning the documents will not be deserialized. */ public final CompletableFuture> get(Function> fn) { return get(fn.apply(new GetRequest.Builder()).build(), Void.class); } /** * Get a document by its ID. *

* Get a document and its source or stored fields from an index. *

* By default, this API is realtime and is not affected by the refresh rate of * the index (when data will become visible for search). In the case where * stored fields are requested with the stored_fields parameter and * the document has been updated but is not yet refreshed, the API will have to * parse and analyze the source to extract the stored fields. To turn off * realtime behavior, set the realtime parameter to false. *

* Source filtering *

* By default, the API returns the contents of the _source field * unless you have used the stored_fields parameter or the * _source field is turned off. You can turn off * _source retrieval by using the _source parameter: * *

	 * GET my-index-000001/_doc/0?_source=false
	 * 
	 * 
*

* If you only need one or two fields from the _source, use the * _source_includes or _source_excludes parameters to * include or filter out particular fields. This can be helpful with large * documents where partial retrieval can save on network overhead Both * parameters take a comma separated list of fields or wildcard expressions. For * example: * *

	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
*

* If you only want to specify includes, you can use a shorter notation: * *

	 * GET my-index-000001/_doc/0?_source=*.id
	 * 
	 * 
*

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to retrieve a document. For example: * *

	 * GET my-index-000001/_doc/2?routing=user1
	 * 
	 * 
*

* This request gets the document with ID 2, but it is routed based on the user. * The document is not fetched if the correct routing is not specified. *

* Distributed *

* The GET operation is hashed into a specific shard ID. It is then redirected * to one of the replicas within that shard ID and returns the result. The * replicas are the primary shard and its replicas within that shard ID group. * This means that the more replicas you have, the better your GET scaling will * be. *

* Versioning support *

* You can use the version parameter to retrieve the document only * if its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @see Documentation * on elastic.co */ public CompletableFuture> get(GetRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get a document by its ID. *

* Get a document and its source or stored fields from an index. *

* By default, this API is realtime and is not affected by the refresh rate of * the index (when data will become visible for search). In the case where * stored fields are requested with the stored_fields parameter and * the document has been updated but is not yet refreshed, the API will have to * parse and analyze the source to extract the stored fields. To turn off * realtime behavior, set the realtime parameter to false. *

* Source filtering *

* By default, the API returns the contents of the _source field * unless you have used the stored_fields parameter or the * _source field is turned off. You can turn off * _source retrieval by using the _source parameter: * *

	 * GET my-index-000001/_doc/0?_source=false
	 * 
	 * 
*

* If you only need one or two fields from the _source, use the * _source_includes or _source_excludes parameters to * include or filter out particular fields. This can be helpful with large * documents where partial retrieval can save on network overhead Both * parameters take a comma separated list of fields or wildcard expressions. For * example: * *

	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
*

* If you only want to specify includes, you can use a shorter notation: * *

	 * GET my-index-000001/_doc/0?_source=*.id
	 * 
	 * 
*

* Routing *

* If routing is used during indexing, the routing value also needs to be * specified to retrieve a document. For example: * *

	 * GET my-index-000001/_doc/2?routing=user1
	 * 
	 * 
*

* This request gets the document with ID 2, but it is routed based on the user. * The document is not fetched if the correct routing is not specified. *

* Distributed *

* The GET operation is hashed into a specific shard ID. It is then redirected * to one of the replicas within that shard ID and returns the result. The * replicas are the primary shard and its replicas within that shard ID group. * This means that the more replicas you have, the better your GET scaling will * be. *

* Versioning support *

* You can use the version parameter to retrieve the document only * if its current version is equal to the specified one. *

* Internally, Elasticsearch has marked the old document as deleted and added an * entirely new document. The old version of the document doesn't disappear * immediately, although you won't be able to access it. Elasticsearch cleans up * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> get( Function> fn, Type tDocumentType) { return get(fn.apply(new GetRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: get_script /** * Get a script or search template. Retrieves a stored script or search * template. * * @see Documentation * on elastic.co */ public CompletableFuture getScript(GetScriptRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetScriptRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get a script or search template. Retrieves a stored script or search * template. * * @param fn * a function that initializes a builder to create the * {@link GetScriptRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getScript( Function> fn) { return getScript(fn.apply(new GetScriptRequest.Builder()).build()); } // ----- Endpoint: get_script_context /** * Get script contexts. *

* Get a list of supported script contexts and their methods. * * @see Documentation * on elastic.co */ public CompletableFuture getScriptContext() { return this.transport.performRequestAsync(GetScriptContextRequest._INSTANCE, GetScriptContextRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: get_script_languages /** * Get script languages. *

* Get a list of available script types, languages, and contexts. * * @see Documentation * on elastic.co */ public CompletableFuture getScriptLanguages() { return this.transport.performRequestAsync(GetScriptLanguagesRequest._INSTANCE, GetScriptLanguagesRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: get_source /** * Get a document's source. *

* Get the source of a document. For example: * *

	 * GET my-index-000001/_source/1
	 * 
	 * 
*

* You can use the source filtering parameters to control which parts of the * _source are returned: * *

	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture> getSource(GetSourceRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.get_source.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get a document's source. *

* Get the source of a document. For example: * *

	 * GET my-index-000001/_source/1
	 * 
	 * 
*

* You can use the source filtering parameters to control which parts of the * _source are returned: * *

	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> getSource( Function> fn, Class tDocumentClass) { return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #getSource(GetSourceRequest, Class)}, where Class is * defined as Void, meaning the documents will not be deserialized. */ public CompletableFuture> getSource(GetSourceRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #getSource(Function, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public final CompletableFuture> getSource( Function> fn) { return getSource(fn.apply(new GetSourceRequest.Builder()).build(), Void.class); } /** * Get a document's source. *

* Get the source of a document. For example: * *

	 * GET my-index-000001/_source/1
	 * 
	 * 
*

* You can use the source filtering parameters to control which parts of the * _source are returned: * *

	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture> getSource(GetSourceRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.get_source.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get a document's source. *

* Get the source of a document. For example: * *

	 * GET my-index-000001/_source/1
	 * 
	 * 
*

* You can use the source filtering parameters to control which parts of the * _source are returned: * *

	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> getSource( Function> fn, Type tDocumentType) { return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: health_report /** * Get the cluster health. Get a report with the health status of an * Elasticsearch cluster. The report contains a list of indicators that compose * Elasticsearch functionality. *

* Each indicator has a health status of: green, unknown, yellow or red. The * indicator will provide an explanation and metadata describing the reason for * its current health status. *

* The cluster’s status is controlled by the worst indicator status. *

* In the event that an indicator’s status is non-green, a list of impacts may * be present in the indicator result which detail the functionalities that are * negatively affected by the health issue. Each impact carries with it a * severity level, an area of the system that is affected, and a simple * description of the impact on the system. *

* Some health indicators can determine the root cause of a health problem and * prescribe a set of steps that can be performed in order to improve the health * of the system. The root cause and remediation steps are encapsulated in a * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an * action containing a brief description of the steps to take to fix the * problem, the list of affected resources (if applicable), and a detailed * step-by-step troubleshooting guide to fix the diagnosed problem. *

* NOTE: The health indicators perform root cause analysis of non-green health * statuses. This can be computationally expensive when called frequently. When * setting up automated polling of the API for health status, set verbose to * false to disable the more expensive analysis logic. * * @see Documentation * on elastic.co */ public CompletableFuture healthReport(HealthReportRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) HealthReportRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get the cluster health. Get a report with the health status of an * Elasticsearch cluster. The report contains a list of indicators that compose * Elasticsearch functionality. *

* Each indicator has a health status of: green, unknown, yellow or red. The * indicator will provide an explanation and metadata describing the reason for * its current health status. *

* The cluster’s status is controlled by the worst indicator status. *

* In the event that an indicator’s status is non-green, a list of impacts may * be present in the indicator result which detail the functionalities that are * negatively affected by the health issue. Each impact carries with it a * severity level, an area of the system that is affected, and a simple * description of the impact on the system. *

* Some health indicators can determine the root cause of a health problem and * prescribe a set of steps that can be performed in order to improve the health * of the system. The root cause and remediation steps are encapsulated in a * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an * action containing a brief description of the steps to take to fix the * problem, the list of affected resources (if applicable), and a detailed * step-by-step troubleshooting guide to fix the diagnosed problem. *

* NOTE: The health indicators perform root cause analysis of non-green health * statuses. This can be computationally expensive when called frequently. When * setting up automated polling of the API for health status, set verbose to * false to disable the more expensive analysis logic. * * @param fn * a function that initializes a builder to create the * {@link HealthReportRequest} * @see Documentation * on elastic.co */ public final CompletableFuture healthReport( Function> fn) { return healthReport(fn.apply(new HealthReportRequest.Builder()).build()); } /** * Get the cluster health. Get a report with the health status of an * Elasticsearch cluster. The report contains a list of indicators that compose * Elasticsearch functionality. *

* Each indicator has a health status of: green, unknown, yellow or red. The * indicator will provide an explanation and metadata describing the reason for * its current health status. *

* The cluster’s status is controlled by the worst indicator status. *

* In the event that an indicator’s status is non-green, a list of impacts may * be present in the indicator result which detail the functionalities that are * negatively affected by the health issue. Each impact carries with it a * severity level, an area of the system that is affected, and a simple * description of the impact on the system. *

* Some health indicators can determine the root cause of a health problem and * prescribe a set of steps that can be performed in order to improve the health * of the system. The root cause and remediation steps are encapsulated in a * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an * action containing a brief description of the steps to take to fix the * problem, the list of affected resources (if applicable), and a detailed * step-by-step troubleshooting guide to fix the diagnosed problem. *

* NOTE: The health indicators perform root cause analysis of non-green health * statuses. This can be computationally expensive when called frequently. When * setting up automated polling of the API for health status, set verbose to * false to disable the more expensive analysis logic. * * @see Documentation * on elastic.co */ public CompletableFuture healthReport() { return this.transport.performRequestAsync(new HealthReportRequest.Builder().build(), HealthReportRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: index /** * Create or update a document in an index. *

* Add a JSON document to the specified data stream or index and make it * searchable. If the target is an index and the document already exists, the * request updates the document and increments its version. *

* NOTE: You cannot use this API to send update requests for existing documents * in a data stream. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To add or overwrite a document using the * PUT /<target>/_doc/<_id> request format, you must * have the create, index, or write index * privilege.
  • *
  • To add a document using the POST /<target>/_doc/ * request format, you must have the create_doc, * create, index, or write index * privilege.
  • *
  • To automatically create a data stream or index with this API request, you * must have the auto_configure, create_index, or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* NOTE: Replica shards might not all be started when an indexing operation * returns successfully. By default, only the primary is required. Set * wait_for_active_shards to change this default behavior. *

* Automatically create data streams and indices *

* If the request's target doesn't exist and matches an index template with a * data_stream definition, the index operation automatically * creates the data stream. *

* If the target doesn't exist and doesn't match a data stream template, the * operation automatically creates the index and applies any matching index * templates. *

* NOTE: Elasticsearch includes several built-in index templates. To avoid * naming collisions with these templates, refer to index pattern documentation. *

* If no mapping exists, the index operation creates a dynamic mapping. By * default, new fields and objects are automatically added to the mapping if * needed. *

* Automatic index creation is controlled by the * action.auto_create_index setting. If it is true, * any index can be created automatically. You can modify this setting to * explicitly allow or block automatic creation of indices that match specified * patterns or set it to false to turn off automatic index creation * entirely. Specify a comma-separated list of patterns you want to allow or * prefix each pattern with + or - to indicate whether * it should be allowed or blocked. When a list is specified, the default * behaviour is to disallow. *

* NOTE: The action.auto_create_index setting affects the automatic * creation of indices only. It does not affect the creation of data streams. *

* Optimistic concurrency control *

* Index operations can be made conditional and only be performed if the last * modification to the document was assigned the sequence number and primary * term specified by the if_seq_no and if_primary_term * parameters. If a mismatch is detected, the operation will result in a * VersionConflictException and a status code of 409. *

* Routing *

* By default, shard placement — or routing — is controlled by using a hash of * the document's ID value. For more explicit control, the value fed into the * hash function used by the router can be directly specified on a per-operation * basis using the routing parameter. *

* When setting up explicit mapping, you can also use the _routing * field to direct the index operation to extract the routing value from the * document itself. This does come at the (very minimal) cost of an additional * document parsing pass. If the _routing mapping is defined and * set to be required, the index operation will fail if no routing value is * provided or extracted. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Distributed *

* The index operation is directed to the primary shard based on its route and * performed on the actual node containing this shard. After the primary shard * completes the operation, if needed, the update is distributed to applicable * replicas. *

* Active shards *

* To improve the resiliency of writes to the system, indexing operations can be * configured to wait for a certain number of active shard copies before * proceeding with the operation. If the requisite number of active shard copies * are not available, then the write operation must wait and retry, until either * the requisite shard copies have started or a timeout occurs. By default, * write operations only wait for the primary shards to be active before * proceeding (that is to say wait_for_active_shards is * 1). This default can be overridden in the index settings * dynamically by setting index.write.wait_for_active_shards. To * alter this behavior per operation, use the * wait_for_active_shards request parameter. *

* Valid values are all or any positive integer up to the total number of * configured copies per shard in the index (which is * number_of_replicas+1). Specifying a negative value or a number * greater than the number of shard copies will throw an error. *

* For example, suppose you have a cluster of three nodes, A, B, and C and you * create an index index with the number of replicas set to 3 (resulting in 4 * shard copies, one more copy than there are nodes). If you attempt an indexing * operation, by default the operation will only ensure the primary copy of each * shard is available before proceeding. This means that even if B and C went * down and A hosted the primary shard copies, the indexing operation would * still proceed with only one copy of the data. If * wait_for_active_shards is set on the request to 3 * (and all three nodes are up), the indexing operation will require 3 active * shard copies before proceeding. This requirement should be met because there * are 3 active nodes in the cluster, each one holding a copy of the shard. * However, if you set wait_for_active_shards to all * (or to 4, which is the same in this situation), the indexing * operation will not proceed as you do not have all 4 copies of each shard * active in the index. The operation will timeout unless a new node is brought * up in the cluster to host the fourth copy of the shard. *

* It is important to note that this setting greatly reduces the chances of the * write operation not writing to the requisite number of shard copies, but it * does not completely eliminate the possibility, because this check occurs * before the write operation starts. After the write operation is underway, it * is still possible for replication to fail on any number of shard copies but * still succeed on the primary. The _shards section of the API * response reveals the number of shard copies on which replication succeeded * and failed. *

* No operation (noop) updates *

* When updating a document by using this API, a new version of the document is * always created even if the document hasn't changed. If this isn't acceptable * use the _update API with detect_noop set to * true. The detect_noop option isn't available on * this API because it doesn’t fetch the old source and isn't able to compare it * against the new source. *

* There isn't a definitive rule for when noop updates aren't acceptable. It's a * combination of lots of factors like how frequently your data source sends * updates that are actually noops and how many queries per second Elasticsearch * runs on the shard receiving the updates. *

* Versioning *

* Each indexed document is given a version number. By default, internal * versioning is used that starts at 1 and increments with each update, deletes * included. Optionally, the version number can be set to an external value (for * example, if maintained in a database). To enable this functionality, * version_type should be set to external. The value * provided must be a numeric, long value greater than or equal to 0, and less * than around 9.2e+18. *

* NOTE: Versioning is completely real time, and is not affected by the near * real time aspects of search operations. If no version is provided, the * operation runs without any version checks. *

* When using the external version type, the system checks to see if the version * number passed to the index request is greater than the version of the * currently stored document. If true, the document will be indexed and the new * version number used. If the value provided is less than or equal to the * stored document's version number, a version conflict will occur and the index * operation will fail. For example: * *

	 * PUT my-index-000001/_doc/1?version=2&version_type=external
	 * {
	 *   "user": {
	 *     "id": "elkbee"
	 *   }
	 * }
	 *
	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
	 *
	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture index(IndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, IndexResponse, ErrorResponse> endpoint = (JsonEndpoint, IndexResponse, ErrorResponse>) IndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update a document in an index. *

* Add a JSON document to the specified data stream or index and make it * searchable. If the target is an index and the document already exists, the * request updates the document and increments its version. *

* NOTE: You cannot use this API to send update requests for existing documents * in a data stream. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or index alias: *

    *
  • To add or overwrite a document using the * PUT /<target>/_doc/<_id> request format, you must * have the create, index, or write index * privilege.
  • *
  • To add a document using the POST /<target>/_doc/ * request format, you must have the create_doc, * create, index, or write index * privilege.
  • *
  • To automatically create a data stream or index with this API request, you * must have the auto_configure, create_index, or * manage index privilege.
  • *
*

* Automatic data stream creation requires a matching index template with data * stream enabled. *

* NOTE: Replica shards might not all be started when an indexing operation * returns successfully. By default, only the primary is required. Set * wait_for_active_shards to change this default behavior. *

* Automatically create data streams and indices *

* If the request's target doesn't exist and matches an index template with a * data_stream definition, the index operation automatically * creates the data stream. *

* If the target doesn't exist and doesn't match a data stream template, the * operation automatically creates the index and applies any matching index * templates. *

* NOTE: Elasticsearch includes several built-in index templates. To avoid * naming collisions with these templates, refer to index pattern documentation. *

* If no mapping exists, the index operation creates a dynamic mapping. By * default, new fields and objects are automatically added to the mapping if * needed. *

* Automatic index creation is controlled by the * action.auto_create_index setting. If it is true, * any index can be created automatically. You can modify this setting to * explicitly allow or block automatic creation of indices that match specified * patterns or set it to false to turn off automatic index creation * entirely. Specify a comma-separated list of patterns you want to allow or * prefix each pattern with + or - to indicate whether * it should be allowed or blocked. When a list is specified, the default * behaviour is to disallow. *

* NOTE: The action.auto_create_index setting affects the automatic * creation of indices only. It does not affect the creation of data streams. *

* Optimistic concurrency control *

* Index operations can be made conditional and only be performed if the last * modification to the document was assigned the sequence number and primary * term specified by the if_seq_no and if_primary_term * parameters. If a mismatch is detected, the operation will result in a * VersionConflictException and a status code of 409. *

* Routing *

* By default, shard placement — or routing — is controlled by using a hash of * the document's ID value. For more explicit control, the value fed into the * hash function used by the router can be directly specified on a per-operation * basis using the routing parameter. *

* When setting up explicit mapping, you can also use the _routing * field to direct the index operation to extract the routing value from the * document itself. This does come at the (very minimal) cost of an additional * document parsing pass. If the _routing mapping is defined and * set to be required, the index operation will fail if no routing value is * provided or extracted. *

* NOTE: Data streams do not support custom routing unless they were created * with the allow_custom_routing setting enabled in the template. *

* Distributed *

* The index operation is directed to the primary shard based on its route and * performed on the actual node containing this shard. After the primary shard * completes the operation, if needed, the update is distributed to applicable * replicas. *

* Active shards *

* To improve the resiliency of writes to the system, indexing operations can be * configured to wait for a certain number of active shard copies before * proceeding with the operation. If the requisite number of active shard copies * are not available, then the write operation must wait and retry, until either * the requisite shard copies have started or a timeout occurs. By default, * write operations only wait for the primary shards to be active before * proceeding (that is to say wait_for_active_shards is * 1). This default can be overridden in the index settings * dynamically by setting index.write.wait_for_active_shards. To * alter this behavior per operation, use the * wait_for_active_shards request parameter. *

* Valid values are all or any positive integer up to the total number of * configured copies per shard in the index (which is * number_of_replicas+1). Specifying a negative value or a number * greater than the number of shard copies will throw an error. *

* For example, suppose you have a cluster of three nodes, A, B, and C and you * create an index index with the number of replicas set to 3 (resulting in 4 * shard copies, one more copy than there are nodes). If you attempt an indexing * operation, by default the operation will only ensure the primary copy of each * shard is available before proceeding. This means that even if B and C went * down and A hosted the primary shard copies, the indexing operation would * still proceed with only one copy of the data. If * wait_for_active_shards is set on the request to 3 * (and all three nodes are up), the indexing operation will require 3 active * shard copies before proceeding. This requirement should be met because there * are 3 active nodes in the cluster, each one holding a copy of the shard. * However, if you set wait_for_active_shards to all * (or to 4, which is the same in this situation), the indexing * operation will not proceed as you do not have all 4 copies of each shard * active in the index. The operation will timeout unless a new node is brought * up in the cluster to host the fourth copy of the shard. *

* It is important to note that this setting greatly reduces the chances of the * write operation not writing to the requisite number of shard copies, but it * does not completely eliminate the possibility, because this check occurs * before the write operation starts. After the write operation is underway, it * is still possible for replication to fail on any number of shard copies but * still succeed on the primary. The _shards section of the API * response reveals the number of shard copies on which replication succeeded * and failed. *

* No operation (noop) updates *

* When updating a document by using this API, a new version of the document is * always created even if the document hasn't changed. If this isn't acceptable * use the _update API with detect_noop set to * true. The detect_noop option isn't available on * this API because it doesn’t fetch the old source and isn't able to compare it * against the new source. *

* There isn't a definitive rule for when noop updates aren't acceptable. It's a * combination of lots of factors like how frequently your data source sends * updates that are actually noops and how many queries per second Elasticsearch * runs on the shard receiving the updates. *

* Versioning *

* Each indexed document is given a version number. By default, internal * versioning is used that starts at 1 and increments with each update, deletes * included. Optionally, the version number can be set to an external value (for * example, if maintained in a database). To enable this functionality, * version_type should be set to external. The value * provided must be a numeric, long value greater than or equal to 0, and less * than around 9.2e+18. *

* NOTE: Versioning is completely real time, and is not affected by the near * real time aspects of search operations. If no version is provided, the * operation runs without any version checks. *

* When using the external version type, the system checks to see if the version * number passed to the index request is greater than the version of the * currently stored document. If true, the document will be indexed and the new * version number used. If the value provided is less than or equal to the * stored document's version number, a version conflict will occur and the index * operation will fail. For example: * *

	 * PUT my-index-000001/_doc/1?version=2&version_type=external
	 * {
	 *   "user": {
	 *     "id": "elkbee"
	 *   }
	 * }
	 *
	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
	 *
	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link IndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture index( Function, ObjectBuilder>> fn) { return index(fn.apply(new IndexRequest.Builder()).build()); } // ----- Endpoint: info /** * Get cluster info. Get basic build, version, and cluster information. * * @see Documentation * on elastic.co */ public CompletableFuture info() { return this.transport.performRequestAsync(InfoRequest._INSTANCE, InfoRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: mget /** * Get multiple documents. *

* Get multiple JSON documents by ID from one or more indices. If you specify an * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. *

* Filter source fields *

* By default, the _source field is returned for every document (if * stored). Use the _source and _source_include or * source_exclude attributes to filter what fields are returned for * a particular document. You can include the _source, * _source_includes, and _source_excludes query * parameters in the request URI to specify the defaults to use when there are * no per-document instructions. *

* Get stored fields *

* Use the stored_fields attribute to specify the set of stored * fields you want to retrieve. Any requested fields that are not stored are * ignored. You can include the stored_fields query parameter in * the request URI to specify the defaults to use when there are no per-document * instructions. * * @see Documentation * on elastic.co */ public CompletableFuture> mget(MgetRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get multiple documents. *

* Get multiple JSON documents by ID from one or more indices. If you specify an * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. *

* Filter source fields *

* By default, the _source field is returned for every document (if * stored). Use the _source and _source_include or * source_exclude attributes to filter what fields are returned for * a particular document. You can include the _source, * _source_includes, and _source_excludes query * parameters in the request URI to specify the defaults to use when there are * no per-document instructions. *

* Get stored fields *

* Use the stored_fields attribute to specify the set of stored * fields you want to retrieve. Any requested fields that are not stored are * ignored. You can include the stored_fields query parameter in * the request URI to specify the defaults to use when there are no per-document * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> mget( Function> fn, Class tDocumentClass) { return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #mget(MgetRequest, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public CompletableFuture> mget(MgetRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #mget(Function, Class)}, where Class is defined as Void, * meaning the documents will not be deserialized. */ public final CompletableFuture> mget( Function> fn) { return mget(fn.apply(new MgetRequest.Builder()).build(), Void.class); } /** * Get multiple documents. *

* Get multiple JSON documents by ID from one or more indices. If you specify an * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. *

* Filter source fields *

* By default, the _source field is returned for every document (if * stored). Use the _source and _source_include or * source_exclude attributes to filter what fields are returned for * a particular document. You can include the _source, * _source_includes, and _source_excludes query * parameters in the request URI to specify the defaults to use when there are * no per-document instructions. *

* Get stored fields *

* Use the stored_fields attribute to specify the set of stored * fields you want to retrieve. Any requested fields that are not stored are * ignored. You can include the stored_fields query parameter in * the request URI to specify the defaults to use when there are no per-document * instructions. * * @see Documentation * on elastic.co */ public CompletableFuture> mget(MgetRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get multiple documents. *

* Get multiple JSON documents by ID from one or more indices. If you specify an * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. *

* Filter source fields *

* By default, the _source field is returned for every document (if * stored). Use the _source and _source_include or * source_exclude attributes to filter what fields are returned for * a particular document. You can include the _source, * _source_includes, and _source_excludes query * parameters in the request URI to specify the defaults to use when there are * no per-document instructions. *

* Get stored fields *

* Use the stored_fields attribute to specify the set of stored * fields you want to retrieve. Any requested fields that are not stored are * ignored. You can include the stored_fields query parameter in * the request URI to specify the defaults to use when there are no per-document * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> mget( Function> fn, Type tDocumentType) { return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: msearch /** * Run multiple searches. *

* The format of the request is similar to the bulk API format and makes use of * the newline delimited JSON (NDJSON) format. The structure is as follows: * *

	 * header\n
	 * body\n
	 * header\n
	 * body\n
	 * 
	 * 
*

* This structure is specifically optimized to reduce parsing if a specific * search ends up redirected to another node. *

* IMPORTANT: The final line of data must end with a newline character * \n. Each newline character may be preceded by a carriage return * \r. When sending requests to this endpoint the * Content-Type header should be set to * application/x-ndjson. * * @see Documentation * on elastic.co */ public CompletableFuture> msearch(MsearchRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run multiple searches. *

* The format of the request is similar to the bulk API format and makes use of * the newline delimited JSON (NDJSON) format. The structure is as follows: * *

	 * header\n
	 * body\n
	 * header\n
	 * body\n
	 * 
	 * 
*

* This structure is specifically optimized to reduce parsing if a specific * search ends up redirected to another node. *

* IMPORTANT: The final line of data must end with a newline character * \n. Each newline character may be preceded by a carriage return * \r. When sending requests to this endpoint the * Content-Type header should be set to * application/x-ndjson. * * @param fn * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> msearch( Function> fn, Class tDocumentClass) { return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #msearch(MsearchRequest, Class)}, where Class is defined * as Void, meaning the documents will not be deserialized. */ public CompletableFuture> msearch(MsearchRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #msearch(Function, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public final CompletableFuture> msearch( Function> fn) { return msearch(fn.apply(new MsearchRequest.Builder()).build(), Void.class); } /** * Run multiple searches. *

* The format of the request is similar to the bulk API format and makes use of * the newline delimited JSON (NDJSON) format. The structure is as follows: * *

	 * header\n
	 * body\n
	 * header\n
	 * body\n
	 * 
	 * 
*

* This structure is specifically optimized to reduce parsing if a specific * search ends up redirected to another node. *

* IMPORTANT: The final line of data must end with a newline character * \n. Each newline character may be preceded by a carriage return * \r. When sending requests to this endpoint the * Content-Type header should be set to * application/x-ndjson. * * @see Documentation * on elastic.co */ public CompletableFuture> msearch(MsearchRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run multiple searches. *

* The format of the request is similar to the bulk API format and makes use of * the newline delimited JSON (NDJSON) format. The structure is as follows: * *

	 * header\n
	 * body\n
	 * header\n
	 * body\n
	 * 
	 * 
*

* This structure is specifically optimized to reduce parsing if a specific * search ends up redirected to another node. *

* IMPORTANT: The final line of data must end with a newline character * \n. Each newline character may be preceded by a carriage return * \r. When sending requests to this endpoint the * Content-Type header should be set to * application/x-ndjson. * * @param fn * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> msearch( Function> fn, Type tDocumentType) { return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: msearch_template /** * Run multiple templated searches. *

* Run multiple templated searches with a single request. If you are providing a * text file or text input to curl, use the * --data-binary flag instead of -d to preserve * newlines. For example: * *

	 * $ cat requests
	 * { "index": "my-index" }
	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
	 * { "index": "my-other-index" }
	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
	 *
	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture> msearchTemplate( MsearchTemplateRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.msearch_template.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run multiple templated searches. *

* Run multiple templated searches with a single request. If you are providing a * text file or text input to curl, use the * --data-binary flag instead of -d to preserve * newlines. For example: * *

	 * $ cat requests
	 * { "index": "my-index" }
	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
	 * { "index": "my-other-index" }
	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
	 *
	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> msearchTemplate( Function> fn, Class tDocumentClass) { return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #msearchTemplate(MsearchTemplateRequest, Class)}, where * Class is defined as Void, meaning the documents will not be deserialized. */ public CompletableFuture> msearchTemplate(MsearchTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #msearchTemplate(Function, Class)}, where Class is defined * as Void, meaning the documents will not be deserialized. */ public final CompletableFuture> msearchTemplate( Function> fn) { return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), Void.class); } /** * Run multiple templated searches. *

* Run multiple templated searches with a single request. If you are providing a * text file or text input to curl, use the * --data-binary flag instead of -d to preserve * newlines. For example: * *

	 * $ cat requests
	 * { "index": "my-index" }
	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
	 * { "index": "my-other-index" }
	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
	 *
	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture> msearchTemplate( MsearchTemplateRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.msearch_template.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run multiple templated searches. *

* Run multiple templated searches with a single request. If you are providing a * text file or text input to curl, use the * --data-binary flag instead of -d to preserve * newlines. For example: * *

	 * $ cat requests
	 * { "index": "my-index" }
	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
	 * { "index": "my-other-index" }
	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
	 *
	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> msearchTemplate( Function> fn, Type tDocumentType) { return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: mtermvectors /** * Get multiple term vectors. *

* Get multiple term vectors with a single request. You can specify existing * documents by index and ID or provide artificial documents in the body of the * request. You can specify the index in the request body or request URI. The * response contains a docs array with all the fetched termvectors. * Each element has the structure provided by the termvectors API. *

* Artificial documents *

* You can also use mtermvectors to generate term vectors for * artificial documents provided in the body of the request. The mapping used is * determined by the specified _index. * * @see Documentation * on elastic.co */ public CompletableFuture mtermvectors(MtermvectorsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) MtermvectorsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get multiple term vectors. *

* Get multiple term vectors with a single request. You can specify existing * documents by index and ID or provide artificial documents in the body of the * request. You can specify the index in the request body or request URI. The * response contains a docs array with all the fetched termvectors. * Each element has the structure provided by the termvectors API. *

* Artificial documents *

* You can also use mtermvectors to generate term vectors for * artificial documents provided in the body of the request. The mapping used is * determined by the specified _index. * * @param fn * a function that initializes a builder to create the * {@link MtermvectorsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture mtermvectors( Function> fn) { return mtermvectors(fn.apply(new MtermvectorsRequest.Builder()).build()); } /** * Get multiple term vectors. *

* Get multiple term vectors with a single request. You can specify existing * documents by index and ID or provide artificial documents in the body of the * request. You can specify the index in the request body or request URI. The * response contains a docs array with all the fetched termvectors. * Each element has the structure provided by the termvectors API. *

* Artificial documents *

* You can also use mtermvectors to generate term vectors for * artificial documents provided in the body of the request. The mapping used is * determined by the specified _index. * * @see Documentation * on elastic.co */ public CompletableFuture mtermvectors() { return this.transport.performRequestAsync(new MtermvectorsRequest.Builder().build(), MtermvectorsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: open_point_in_time /** * Open a point in time. *

* A search request by default runs against the most recent visible data of the * target indices, which is called point in time. Elasticsearch pit (point in * time) is a lightweight view into the state of the data as it existed when * initiated. In some cases, it’s preferred to perform multiple search requests * using the same point in time. For example, if refreshes happen between * search_after requests, then the results of those requests might * not be consistent as changes happening between searches are only visible to * the more recent point in time. *

* A point in time must be opened explicitly before being used in search * requests. *

* A subsequent search request with the pit parameter must not * specify index, routing, or preference * values as these parameters are copied from the point in time. *

* Just like regular searches, you can use from and * size to page through point in time search results, up to the * first 10,000 hits. If you want to retrieve more hits, use PIT with * search_after. *

* IMPORTANT: The open point in time request and each subsequent search request * can return different identifiers; always use the most recently received ID * for the next search request. *

* When a PIT that contains shard failures is used in a search request, the * missing are always reported in the search response as a * NoShardAvailableActionException exception. To get rid of these * exceptions, a new PIT needs to be created so that shards missing from the * previous PIT can be handled, assuming they become available in the meantime. *

* Keeping point in time alive *

* The keep_alive parameter, which is passed to a open point in * time request and search request, extends the time to live of the * corresponding point in time. The value does not need to be long enough to * process all data — it just needs to be long enough for the next request. *

* Normally, the background merge process optimizes the index by merging * together smaller segments to create new, bigger segments. Once the smaller * segments are no longer needed they are deleted. However, open point-in-times * prevent the old segments from being deleted since they are still in use. *

* TIP: Keeping older segments alive means that more disk space and file handles * are needed. Ensure that you have configured your nodes to have ample free * file handles. *

* Additionally, if a segment contains deleted or updated documents then the * point in time must keep track of whether each document in the segment was * live at the time of the initial search request. Ensure that your nodes have * sufficient heap space if you have many open point-in-times on an index that * is subject to ongoing deletes or updates. Note that a point-in-time doesn't * prevent its associated indices from being deleted. You can check how many * point-in-times (that is, search contexts) are open with the nodes stats API. * * @see Documentation * on elastic.co */ public CompletableFuture openPointInTime(OpenPointInTimeRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) OpenPointInTimeRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Open a point in time. *

* A search request by default runs against the most recent visible data of the * target indices, which is called point in time. Elasticsearch pit (point in * time) is a lightweight view into the state of the data as it existed when * initiated. In some cases, it’s preferred to perform multiple search requests * using the same point in time. For example, if refreshes happen between * search_after requests, then the results of those requests might * not be consistent as changes happening between searches are only visible to * the more recent point in time. *

* A point in time must be opened explicitly before being used in search * requests. *

* A subsequent search request with the pit parameter must not * specify index, routing, or preference * values as these parameters are copied from the point in time. *

* Just like regular searches, you can use from and * size to page through point in time search results, up to the * first 10,000 hits. If you want to retrieve more hits, use PIT with * search_after. *

* IMPORTANT: The open point in time request and each subsequent search request * can return different identifiers; always use the most recently received ID * for the next search request. *

* When a PIT that contains shard failures is used in a search request, the * missing are always reported in the search response as a * NoShardAvailableActionException exception. To get rid of these * exceptions, a new PIT needs to be created so that shards missing from the * previous PIT can be handled, assuming they become available in the meantime. *

* Keeping point in time alive *

* The keep_alive parameter, which is passed to a open point in * time request and search request, extends the time to live of the * corresponding point in time. The value does not need to be long enough to * process all data — it just needs to be long enough for the next request. *

* Normally, the background merge process optimizes the index by merging * together smaller segments to create new, bigger segments. Once the smaller * segments are no longer needed they are deleted. However, open point-in-times * prevent the old segments from being deleted since they are still in use. *

* TIP: Keeping older segments alive means that more disk space and file handles * are needed. Ensure that you have configured your nodes to have ample free * file handles. *

* Additionally, if a segment contains deleted or updated documents then the * point in time must keep track of whether each document in the segment was * live at the time of the initial search request. Ensure that your nodes have * sufficient heap space if you have many open point-in-times on an index that * is subject to ongoing deletes or updates. Note that a point-in-time doesn't * prevent its associated indices from being deleted. You can check how many * point-in-times (that is, search contexts) are open with the nodes stats API. * * @param fn * a function that initializes a builder to create the * {@link OpenPointInTimeRequest} * @see Documentation * on elastic.co */ public final CompletableFuture openPointInTime( Function> fn) { return openPointInTime(fn.apply(new OpenPointInTimeRequest.Builder()).build()); } // ----- Endpoint: ping /** * Ping the cluster. Get information about whether the cluster is running. * * @see Documentation * on elastic.co */ public CompletableFuture ping() { return this.transport.performRequestAsync(PingRequest._INSTANCE, PingRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: put_script /** * Create or update a script or search template. Creates or updates a stored * script or search template. * * @see Documentation * on elastic.co */ public CompletableFuture putScript(PutScriptRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutScriptRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update a script or search template. Creates or updates a stored * script or search template. * * @param fn * a function that initializes a builder to create the * {@link PutScriptRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putScript( Function> fn) { return putScript(fn.apply(new PutScriptRequest.Builder()).build()); } // ----- Endpoint: rank_eval /** * Evaluate ranked search results. *

* Evaluate the quality of ranked search results over a set of typical search * queries. * * @see Documentation * on elastic.co */ public CompletableFuture rankEval(RankEvalRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) RankEvalRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Evaluate ranked search results. *

* Evaluate the quality of ranked search results over a set of typical search * queries. * * @param fn * a function that initializes a builder to create the * {@link RankEvalRequest} * @see Documentation * on elastic.co */ public final CompletableFuture rankEval( Function> fn) { return rankEval(fn.apply(new RankEvalRequest.Builder()).build()); } // ----- Endpoint: reindex /** * Reindex documents. *

* Copy documents from a source to a destination. You can copy all documents to * the destination index or reindex a subset of the documents. The source can be * any existing index, alias, or data stream. The destination must differ from * the source. For example, you cannot reindex a data stream into itself. *

* IMPORTANT: Reindex requires _source to be enabled for all * documents in the source. The destination should be configured as wanted * before calling the reindex API. Reindex does not copy the settings from the * source or its associated template. Mappings, shard counts, and replicas, for * example, must be configured ahead of time. *

* If the Elasticsearch security features are enabled, you must have the * following security privileges: *

    *
  • The read index privilege for the source data stream, index, * or alias.
  • *
  • The write index privilege for the destination data stream, * index, or index alias.
  • *
  • To automatically create a data stream or index with a reindex API * request, you must have the auto_configure, * create_index, or manage index privilege for the * destination data stream, index, or alias.
  • *
  • If reindexing from a remote cluster, the source.remote.user * must have the monitor cluster privilege and the * read index privilege for the source data stream, index, or * alias.
  • *
*

* If reindexing from a remote cluster, you must explicitly allow the remote * host in the reindex.remote.whitelist setting. Automatic data * stream creation requires a matching index template with data stream enabled. *

* The dest element can be configured like the index API to control * optimistic concurrency control. Omitting version_type or setting * it to internal causes Elasticsearch to blindly dump documents * into the destination, overwriting any that happen to have the same ID. *

* Setting version_type to external causes * Elasticsearch to preserve the version from the source, create * any documents that are missing, and update any documents that have an older * version in the destination than they do in the source. *

* Setting op_type to create causes the reindex API to * create only missing documents in the destination. All existing documents will * cause a version conflict. *

* IMPORTANT: Because data streams are append-only, any reindex request to a * destination data stream must have an op_type of * create. A reindex can only add new documents to a destination * data stream. It cannot update existing documents in a destination data * stream. *

* By default, version conflicts abort the reindex process. To continue * reindexing if there are conflicts, set the conflicts request * body property to proceed. In this case, the response includes a * count of the version conflicts that were encountered. Note that the handling * of other error types is unaffected by the conflicts property. * Additionally, if you opt to count version conflicts, the operation could * attempt to reindex more documents from the source than max_docs * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

* Refer to the linked documentation for examples of how to reindex documents. * * @see Documentation * on elastic.co */ public CompletableFuture reindex(ReindexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ReindexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Reindex documents. *

* Copy documents from a source to a destination. You can copy all documents to * the destination index or reindex a subset of the documents. The source can be * any existing index, alias, or data stream. The destination must differ from * the source. For example, you cannot reindex a data stream into itself. *

* IMPORTANT: Reindex requires _source to be enabled for all * documents in the source. The destination should be configured as wanted * before calling the reindex API. Reindex does not copy the settings from the * source or its associated template. Mappings, shard counts, and replicas, for * example, must be configured ahead of time. *

* If the Elasticsearch security features are enabled, you must have the * following security privileges: *

    *
  • The read index privilege for the source data stream, index, * or alias.
  • *
  • The write index privilege for the destination data stream, * index, or index alias.
  • *
  • To automatically create a data stream or index with a reindex API * request, you must have the auto_configure, * create_index, or manage index privilege for the * destination data stream, index, or alias.
  • *
  • If reindexing from a remote cluster, the source.remote.user * must have the monitor cluster privilege and the * read index privilege for the source data stream, index, or * alias.
  • *
*

* If reindexing from a remote cluster, you must explicitly allow the remote * host in the reindex.remote.whitelist setting. Automatic data * stream creation requires a matching index template with data stream enabled. *

* The dest element can be configured like the index API to control * optimistic concurrency control. Omitting version_type or setting * it to internal causes Elasticsearch to blindly dump documents * into the destination, overwriting any that happen to have the same ID. *

* Setting version_type to external causes * Elasticsearch to preserve the version from the source, create * any documents that are missing, and update any documents that have an older * version in the destination than they do in the source. *

* Setting op_type to create causes the reindex API to * create only missing documents in the destination. All existing documents will * cause a version conflict. *

* IMPORTANT: Because data streams are append-only, any reindex request to a * destination data stream must have an op_type of * create. A reindex can only add new documents to a destination * data stream. It cannot update existing documents in a destination data * stream. *

* By default, version conflicts abort the reindex process. To continue * reindexing if there are conflicts, set the conflicts request * body property to proceed. In this case, the response includes a * count of the version conflicts that were encountered. Note that the handling * of other error types is unaffected by the conflicts property. * Additionally, if you opt to count version conflicts, the operation could * attempt to reindex more documents from the source than max_docs * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

* Refer to the linked documentation for examples of how to reindex documents. * * @param fn * a function that initializes a builder to create the * {@link ReindexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture reindex( Function> fn) { return reindex(fn.apply(new ReindexRequest.Builder()).build()); } // ----- Endpoint: reindex_rethrottle /** * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. * For example: * *

	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
	 * 
	 * 
*

* Rethrottling that speeds up the query takes effect immediately. Rethrottling * that slows down the query will take effect after completing the current * batch. This behavior prevents scroll timeouts. * * @see Documentation * on elastic.co */ public CompletableFuture reindexRethrottle(ReindexRethrottleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ReindexRethrottleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. * For example: * *

	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
	 * 
	 * 
*

* Rethrottling that speeds up the query takes effect immediately. Rethrottling * that slows down the query will take effect after completing the current * batch. This behavior prevents scroll timeouts. * * @param fn * a function that initializes a builder to create the * {@link ReindexRethrottleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture reindexRethrottle( Function> fn) { return reindexRethrottle(fn.apply(new ReindexRethrottleRequest.Builder()).build()); } // ----- Endpoint: render_search_template /** * Render a search template. *

* Render a search template as a search request body. * * @see Documentation * on elastic.co */ public CompletableFuture renderSearchTemplate(RenderSearchTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) RenderSearchTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Render a search template. *

* Render a search template as a search request body. * * @param fn * a function that initializes a builder to create the * {@link RenderSearchTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture renderSearchTemplate( Function> fn) { return renderSearchTemplate(fn.apply(new RenderSearchTemplateRequest.Builder()).build()); } /** * Render a search template. *

* Render a search template as a search request body. * * @see Documentation * on elastic.co */ public CompletableFuture renderSearchTemplate() { return this.transport.performRequestAsync(new RenderSearchTemplateRequest.Builder().build(), RenderSearchTemplateRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: scripts_painless_execute /** * Run a script. *

* Runs a script and returns a result. Use this API to build and test scripts, * such as when defining a script for a runtime field. This API requires very * few dependencies and is especially useful if you don't have permissions to * write documents on a cluster. *

* The API uses several contexts, which control how scripts are run, * what variables are available at runtime, and what the return type is. *

* Each context requires a script, but additional parameters depend on the * context you're using for that script. * * @see Documentation * on elastic.co */ public CompletableFuture> scriptsPainlessExecute( ScriptsPainlessExecuteRequest request, Class tResultClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScriptsPainlessExecuteRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.scripts_painless_execute.Response.TResult", getDeserializer(tResultClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a script. *

* Runs a script and returns a result. Use this API to build and test scripts, * such as when defining a script for a runtime field. This API requires very * few dependencies and is especially useful if you don't have permissions to * write documents on a cluster. *

* The API uses several contexts, which control how scripts are run, * what variables are available at runtime, and what the return type is. *

* Each context requires a script, but additional parameters depend on the * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> scriptsPainlessExecute( Function> fn, Class tResultClass) { return scriptsPainlessExecute(fn.apply(new ScriptsPainlessExecuteRequest.Builder()).build(), tResultClass); } /** * Overload of * {@link #scriptsPainlessExecute(ScriptsPainlessExecuteRequest, Class)}, where * Class is defined as Void, meaning the documents will not be deserialized. */ public CompletableFuture> scriptsPainlessExecute( ScriptsPainlessExecuteRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScriptsPainlessExecuteRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #scriptsPainlessExecute(Function, Class)}, where Class is * defined as Void, meaning the documents will not be deserialized. */ public final CompletableFuture> scriptsPainlessExecute( Function> fn) { return scriptsPainlessExecute(fn.apply(new ScriptsPainlessExecuteRequest.Builder()).build(), Void.class); } /** * Run a script. *

* Runs a script and returns a result. Use this API to build and test scripts, * such as when defining a script for a runtime field. This API requires very * few dependencies and is especially useful if you don't have permissions to * write documents on a cluster. *

* The API uses several contexts, which control how scripts are run, * what variables are available at runtime, and what the return type is. *

* Each context requires a script, but additional parameters depend on the * context you're using for that script. * * @see Documentation * on elastic.co */ public CompletableFuture> scriptsPainlessExecute( ScriptsPainlessExecuteRequest request, Type tResultType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScriptsPainlessExecuteRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.scripts_painless_execute.Response.TResult", getDeserializer(tResultType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a script. *

* Runs a script and returns a result. Use this API to build and test scripts, * such as when defining a script for a runtime field. This API requires very * few dependencies and is especially useful if you don't have permissions to * write documents on a cluster. *

* The API uses several contexts, which control how scripts are run, * what variables are available at runtime, and what the return type is. *

* Each context requires a script, but additional parameters depend on the * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> scriptsPainlessExecute( Function> fn, Type tResultType) { return scriptsPainlessExecute(fn.apply(new ScriptsPainlessExecuteRequest.Builder()).build(), tResultType); } // ----- Endpoint: scroll /** * Run a scrolling search. *

* IMPORTANT: The scroll API is no longer recommend for deep pagination. If you * need to preserve the index state while paging through more than 10,000 hits, * use the search_after parameter with a point in time (PIT). *

* The scroll API gets large sets of results from a single scrolling search * request. To get the necessary scroll ID, submit a search API request that * includes an argument for the scroll query parameter. The * scroll parameter indicates how long Elasticsearch should retain * the search context for the request. The search response returns a scroll ID * in the _scroll_id response body parameter. You can then use the * scroll ID with the scroll API to retrieve the next batch of results for the * request. If the Elasticsearch security features are enabled, the access to * the results of a specific scroll ID is restricted to the user or API key that * submitted the search. *

* You can also use the scroll API to specify a new scroll parameter that * extends or shortens the retention period for the search context. *

* IMPORTANT: Results from a scrolling search reflect the state of the index at * the time of the initial search request. Subsequent indexing or document * changes only affect later search and scroll requests. * * @see Documentation * on elastic.co */ public CompletableFuture> scroll(ScrollRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScrollRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.scroll.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a scrolling search. *

* IMPORTANT: The scroll API is no longer recommend for deep pagination. If you * need to preserve the index state while paging through more than 10,000 hits, * use the search_after parameter with a point in time (PIT). *

* The scroll API gets large sets of results from a single scrolling search * request. To get the necessary scroll ID, submit a search API request that * includes an argument for the scroll query parameter. The * scroll parameter indicates how long Elasticsearch should retain * the search context for the request. The search response returns a scroll ID * in the _scroll_id response body parameter. You can then use the * scroll ID with the scroll API to retrieve the next batch of results for the * request. If the Elasticsearch security features are enabled, the access to * the results of a specific scroll ID is restricted to the user or API key that * submitted the search. *

* You can also use the scroll API to specify a new scroll parameter that * extends or shortens the retention period for the search context. *

* IMPORTANT: Results from a scrolling search reflect the state of the index at * the time of the initial search request. Subsequent indexing or document * changes only affect later search and scroll requests. * * @param fn * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> scroll( Function> fn, Class tDocumentClass) { return scroll(fn.apply(new ScrollRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #scroll(ScrollRequest, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public CompletableFuture> scroll(ScrollRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScrollRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #scroll(Function, Class)}, where Class is defined as Void, * meaning the documents will not be deserialized. */ public final CompletableFuture> scroll( Function> fn) { return scroll(fn.apply(new ScrollRequest.Builder()).build(), Void.class); } /** * Run a scrolling search. *

* IMPORTANT: The scroll API is no longer recommend for deep pagination. If you * need to preserve the index state while paging through more than 10,000 hits, * use the search_after parameter with a point in time (PIT). *

* The scroll API gets large sets of results from a single scrolling search * request. To get the necessary scroll ID, submit a search API request that * includes an argument for the scroll query parameter. The * scroll parameter indicates how long Elasticsearch should retain * the search context for the request. The search response returns a scroll ID * in the _scroll_id response body parameter. You can then use the * scroll ID with the scroll API to retrieve the next batch of results for the * request. If the Elasticsearch security features are enabled, the access to * the results of a specific scroll ID is restricted to the user or API key that * submitted the search. *

* You can also use the scroll API to specify a new scroll parameter that * extends or shortens the retention period for the search context. *

* IMPORTANT: Results from a scrolling search reflect the state of the index at * the time of the initial search request. Subsequent indexing or document * changes only affect later search and scroll requests. * * @see Documentation * on elastic.co */ public CompletableFuture> scroll(ScrollRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ScrollRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.scroll.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a scrolling search. *

* IMPORTANT: The scroll API is no longer recommend for deep pagination. If you * need to preserve the index state while paging through more than 10,000 hits, * use the search_after parameter with a point in time (PIT). *

* The scroll API gets large sets of results from a single scrolling search * request. To get the necessary scroll ID, submit a search API request that * includes an argument for the scroll query parameter. The * scroll parameter indicates how long Elasticsearch should retain * the search context for the request. The search response returns a scroll ID * in the _scroll_id response body parameter. You can then use the * scroll ID with the scroll API to retrieve the next batch of results for the * request. If the Elasticsearch security features are enabled, the access to * the results of a specific scroll ID is restricted to the user or API key that * submitted the search. *

* You can also use the scroll API to specify a new scroll parameter that * extends or shortens the retention period for the search context. *

* IMPORTANT: Results from a scrolling search reflect the state of the index at * the time of the initial search request. Subsequent indexing or document * changes only affect later search and scroll requests. * * @param fn * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> scroll( Function> fn, Type tDocumentType) { return scroll(fn.apply(new ScrollRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: search /** * Run a search. *

* Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. *

* If the Elasticsearch security features are enabled, you must have the read * index privilege for the target data stream, index, or alias. For * cross-cluster search, refer to the documentation about configuring CCS * privileges. To search a point in time (PIT) for an alias, you must have the * read index privilege for the alias's data streams or indices. *

* Search slicing *

* When paging through a large number of documents, it can be helpful to split * the search into multiple slices to consume them independently with the * slice and pit properties. By default the splitting * is done first on the shards, then locally on each shard. The local splitting * partitions the shard into contiguous ranges based on Lucene document IDs. *

* For instance if the number of shards is equal to 2 and you request 4 slices, * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are * assigned to the second shard. *

* IMPORTANT: The same point-in-time ID should be used for all slices. If * different PIT IDs are used, slices can overlap and miss documents. This * situation can occur because the splitting criterion is based on Lucene * document IDs, which are not stable across changes to the index. * * @see Documentation * on elastic.co */ public CompletableFuture> search(SearchRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.search.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a search. *

* Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. *

* If the Elasticsearch security features are enabled, you must have the read * index privilege for the target data stream, index, or alias. For * cross-cluster search, refer to the documentation about configuring CCS * privileges. To search a point in time (PIT) for an alias, you must have the * read index privilege for the alias's data streams or indices. *

* Search slicing *

* When paging through a large number of documents, it can be helpful to split * the search into multiple slices to consume them independently with the * slice and pit properties. By default the splitting * is done first on the shards, then locally on each shard. The local splitting * partitions the shard into contiguous ranges based on Lucene document IDs. *

* For instance if the number of shards is equal to 2 and you request 4 slices, * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are * assigned to the second shard. *

* IMPORTANT: The same point-in-time ID should be used for all slices. If * different PIT IDs are used, slices can overlap and miss documents. This * situation can occur because the splitting criterion is based on Lucene * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> search( Function> fn, Class tDocumentClass) { return search(fn.apply(new SearchRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #search(SearchRequest, Class)}, where Class is defined as * Void, meaning the documents will not be deserialized. */ public CompletableFuture> search(SearchRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #search(Function, Class)}, where Class is defined as Void, * meaning the documents will not be deserialized. */ public final CompletableFuture> search( Function> fn) { return search(fn.apply(new SearchRequest.Builder()).build(), Void.class); } /** * Run a search. *

* Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. *

* If the Elasticsearch security features are enabled, you must have the read * index privilege for the target data stream, index, or alias. For * cross-cluster search, refer to the documentation about configuring CCS * privileges. To search a point in time (PIT) for an alias, you must have the * read index privilege for the alias's data streams or indices. *

* Search slicing *

* When paging through a large number of documents, it can be helpful to split * the search into multiple slices to consume them independently with the * slice and pit properties. By default the splitting * is done first on the shards, then locally on each shard. The local splitting * partitions the shard into contiguous ranges based on Lucene document IDs. *

* For instance if the number of shards is equal to 2 and you request 4 slices, * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are * assigned to the second shard. *

* IMPORTANT: The same point-in-time ID should be used for all slices. If * different PIT IDs are used, slices can overlap and miss documents. This * situation can occur because the splitting criterion is based on Lucene * document IDs, which are not stable across changes to the index. * * @see Documentation * on elastic.co */ public CompletableFuture> search(SearchRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.search.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a search. *

* Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. *

* If the Elasticsearch security features are enabled, you must have the read * index privilege for the target data stream, index, or alias. For * cross-cluster search, refer to the documentation about configuring CCS * privileges. To search a point in time (PIT) for an alias, you must have the * read index privilege for the alias's data streams or indices. *

* Search slicing *

* When paging through a large number of documents, it can be helpful to split * the search into multiple slices to consume them independently with the * slice and pit properties. By default the splitting * is done first on the shards, then locally on each shard. The local splitting * partitions the shard into contiguous ranges based on Lucene document IDs. *

* For instance if the number of shards is equal to 2 and you request 4 slices, * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are * assigned to the second shard. *

* IMPORTANT: The same point-in-time ID should be used for all slices. If * different PIT IDs are used, slices can overlap and miss documents. This * situation can occur because the splitting criterion is based on Lucene * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> search( Function> fn, Type tDocumentType) { return search(fn.apply(new SearchRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: search_mvt /** * Search a vector tile. *

* Search a vector tile for geospatial values. Before using this API, you should * be familiar with the Mapbox vector tile specification. The API returns * results as a binary mapbox vector tile. *

* Internally, Elasticsearch translates a vector tile search API request into a * search containing: *

    *
  • A geo_bounding_box query on the <field>. * The query uses the <zoom>/<x>/<y> tile as a * bounding box.
  • *
  • A geotile_grid or geohex_grid aggregation on * the <field>. The grid_agg parameter * determines the aggregation type. The aggregation uses the * <zoom>/<x>/<y> tile as a bounding box.
  • *
  • Optionally, a geo_bounds aggregation on the * <field>. The search only includes this aggregation if the * exact_bounds parameter is true.
  • *
  • If the optional parameter with_labels is true, * the internal search will include a dynamic runtime field that calls the * getLabelPosition function of the geometry doc value. This * enables the generation of new point features containing suggested geometry * labels, so that, for example, multi-polygons will have only one label.
  • *
*

* For example, Elasticsearch may translate a vector tile search API request * with a grid_agg argument of geotile and an * exact_bounds argument of true into the following * search * *

	 * GET my-index/_search
	 * {
	 *   "size": 10000,
	 *   "query": {
	 *     "geo_bounding_box": {
	 *       "my-geo-field": {
	 *         "top_left": {
	 *           "lat": -40.979898069620134,
	 *           "lon": -45
	 *         },
	 *         "bottom_right": {
	 *           "lat": -66.51326044311186,
	 *           "lon": 0
	 *         }
	 *       }
	 *     }
	 *   },
	 *   "aggregations": {
	 *     "grid": {
	 *       "geotile_grid": {
	 *         "field": "my-geo-field",
	 *         "precision": 11,
	 *         "size": 65536,
	 *         "bounds": {
	 *           "top_left": {
	 *             "lat": -40.979898069620134,
	 *             "lon": -45
	 *           },
	 *           "bottom_right": {
	 *             "lat": -66.51326044311186,
	 *             "lon": 0
	 *           }
	 *         }
	 *       }
	 *     },
	 *     "bounds": {
	 *       "geo_bounds": {
	 *         "field": "my-geo-field",
	 *         "wrap_longitude": false
	 *       }
	 *     }
	 *   }
	 * }
	 * 
	 * 
*

* The API returns results as a binary Mapbox vector tile. Mapbox vector tiles * are encoded as Google Protobufs (PBF). By default, the tile contains three * layers: *

    *
  • A hits layer containing a feature for each * <field> value matching the geo_bounding_box * query.
  • *
  • An aggs layer containing a feature for each cell of the * geotile_grid or geohex_grid. The layer only * contains features for cells with matching data.
  • *
  • A meta layer containing: *
      *
    • A feature containing a bounding box. By default, this is the bounding box * of the tile.
    • *
    • Value ranges for any sub-aggregations on the geotile_grid or * geohex_grid.
    • *
    • Metadata for the search.
    • *
    *
  • *
*

* The API only returns features that can display at its zoom level. For * example, if a polygon feature has no area at its zoom level, the API omits * it. The API returns errors as UTF-8 encoded JSON. *

* IMPORTANT: You can specify several options for this API as either a query * parameter or request body parameter. If you specify both parameters, the * query parameter takes precedence. *

* Grid precision for geotile *

* For a grid_agg of geotile, you can use cells in the * aggs layer as tiles for lower zoom levels. * grid_precision represents the additional zoom levels available * through these cells. The final precision is computed by as follows: * <zoom> + grid_precision. For example, if * <zoom> is 7 and grid_precision is 8, then the * geotile_grid aggregation will use a precision of 15. The maximum * final precision is 29. The grid_precision also determines the * number of cells for the grid as follows: * (2^grid_precision) x (2^grid_precision). For example, a value of * 8 divides the tile into a grid of 256 x 256 cells. The aggs * layer only contains features for cells with matching data. *

* Grid precision for geohex *

* For a grid_agg of geohex, Elasticsearch uses * <zoom> and grid_precision to calculate a * final precision as follows: <zoom> + grid_precision. *

* This precision determines the H3 resolution of the hexagonal cells produced * by the geohex aggregation. The following table maps the H3 * resolution for each precision. For example, if <zoom> is 3 * and grid_precision is 3, the precision is 6. At a precision of * 6, hexagonal cells have an H3 resolution of 2. If <zoom> * is 3 and grid_precision is 4, the precision is 7. At a precision * of 7, hexagonal cells have an H3 resolution of 3. *

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
*

* Hexagonal cells don't align perfectly on a vector tile. Some cells may * intersect more than one vector tile. To compute the H3 resolution for each * precision, Elasticsearch compares the average density of hexagonal bins at * each resolution with the average density of tile bins at each zoom level. * Elasticsearch uses the H3 resolution that is closest to the corresponding * geotile density. * * @see Documentation * on elastic.co */ public CompletableFuture searchMvt(SearchMvtRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) SearchMvtRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Search a vector tile. *

* Search a vector tile for geospatial values. Before using this API, you should * be familiar with the Mapbox vector tile specification. The API returns * results as a binary mapbox vector tile. *

* Internally, Elasticsearch translates a vector tile search API request into a * search containing: *

    *
  • A geo_bounding_box query on the <field>. * The query uses the <zoom>/<x>/<y> tile as a * bounding box.
  • *
  • A geotile_grid or geohex_grid aggregation on * the <field>. The grid_agg parameter * determines the aggregation type. The aggregation uses the * <zoom>/<x>/<y> tile as a bounding box.
  • *
  • Optionally, a geo_bounds aggregation on the * <field>. The search only includes this aggregation if the * exact_bounds parameter is true.
  • *
  • If the optional parameter with_labels is true, * the internal search will include a dynamic runtime field that calls the * getLabelPosition function of the geometry doc value. This * enables the generation of new point features containing suggested geometry * labels, so that, for example, multi-polygons will have only one label.
  • *
*

* For example, Elasticsearch may translate a vector tile search API request * with a grid_agg argument of geotile and an * exact_bounds argument of true into the following * search * *

	 * GET my-index/_search
	 * {
	 *   "size": 10000,
	 *   "query": {
	 *     "geo_bounding_box": {
	 *       "my-geo-field": {
	 *         "top_left": {
	 *           "lat": -40.979898069620134,
	 *           "lon": -45
	 *         },
	 *         "bottom_right": {
	 *           "lat": -66.51326044311186,
	 *           "lon": 0
	 *         }
	 *       }
	 *     }
	 *   },
	 *   "aggregations": {
	 *     "grid": {
	 *       "geotile_grid": {
	 *         "field": "my-geo-field",
	 *         "precision": 11,
	 *         "size": 65536,
	 *         "bounds": {
	 *           "top_left": {
	 *             "lat": -40.979898069620134,
	 *             "lon": -45
	 *           },
	 *           "bottom_right": {
	 *             "lat": -66.51326044311186,
	 *             "lon": 0
	 *           }
	 *         }
	 *       }
	 *     },
	 *     "bounds": {
	 *       "geo_bounds": {
	 *         "field": "my-geo-field",
	 *         "wrap_longitude": false
	 *       }
	 *     }
	 *   }
	 * }
	 * 
	 * 
*

* The API returns results as a binary Mapbox vector tile. Mapbox vector tiles * are encoded as Google Protobufs (PBF). By default, the tile contains three * layers: *

    *
  • A hits layer containing a feature for each * <field> value matching the geo_bounding_box * query.
  • *
  • An aggs layer containing a feature for each cell of the * geotile_grid or geohex_grid. The layer only * contains features for cells with matching data.
  • *
  • A meta layer containing: *
      *
    • A feature containing a bounding box. By default, this is the bounding box * of the tile.
    • *
    • Value ranges for any sub-aggregations on the geotile_grid or * geohex_grid.
    • *
    • Metadata for the search.
    • *
    *
  • *
*

* The API only returns features that can display at its zoom level. For * example, if a polygon feature has no area at its zoom level, the API omits * it. The API returns errors as UTF-8 encoded JSON. *

* IMPORTANT: You can specify several options for this API as either a query * parameter or request body parameter. If you specify both parameters, the * query parameter takes precedence. *

* Grid precision for geotile *

* For a grid_agg of geotile, you can use cells in the * aggs layer as tiles for lower zoom levels. * grid_precision represents the additional zoom levels available * through these cells. The final precision is computed by as follows: * <zoom> + grid_precision. For example, if * <zoom> is 7 and grid_precision is 8, then the * geotile_grid aggregation will use a precision of 15. The maximum * final precision is 29. The grid_precision also determines the * number of cells for the grid as follows: * (2^grid_precision) x (2^grid_precision). For example, a value of * 8 divides the tile into a grid of 256 x 256 cells. The aggs * layer only contains features for cells with matching data. *

* Grid precision for geohex *

* For a grid_agg of geohex, Elasticsearch uses * <zoom> and grid_precision to calculate a * final precision as follows: <zoom> + grid_precision. *

* This precision determines the H3 resolution of the hexagonal cells produced * by the geohex aggregation. The following table maps the H3 * resolution for each precision. For example, if <zoom> is 3 * and grid_precision is 3, the precision is 6. At a precision of * 6, hexagonal cells have an H3 resolution of 2. If <zoom> * is 3 and grid_precision is 4, the precision is 7. At a precision * of 7, hexagonal cells have an H3 resolution of 3. *

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
*

* Hexagonal cells don't align perfectly on a vector tile. Some cells may * intersect more than one vector tile. To compute the H3 resolution for each * precision, Elasticsearch compares the average density of hexagonal bins at * each resolution with the average density of tile bins at each zoom level. * Elasticsearch uses the H3 resolution that is closest to the corresponding * geotile density. * * @param fn * a function that initializes a builder to create the * {@link SearchMvtRequest} * @see Documentation * on elastic.co */ public final CompletableFuture searchMvt( Function> fn) { return searchMvt(fn.apply(new SearchMvtRequest.Builder()).build()); } // ----- Endpoint: search_shards /** * Get the search shards. *

* Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the * filter is returned as part of the indices section. *

* If the Elasticsearch security features are enabled, you must have the * view_index_metadata or manage index privilege for * the target data stream, index, or alias. * * @see Documentation * on elastic.co */ public CompletableFuture searchShards(SearchShardsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) SearchShardsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get the search shards. *

* Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the * filter is returned as part of the indices section. *

* If the Elasticsearch security features are enabled, you must have the * view_index_metadata or manage index privilege for * the target data stream, index, or alias. * * @param fn * a function that initializes a builder to create the * {@link SearchShardsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture searchShards( Function> fn) { return searchShards(fn.apply(new SearchShardsRequest.Builder()).build()); } /** * Get the search shards. *

* Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the * filter is returned as part of the indices section. *

* If the Elasticsearch security features are enabled, you must have the * view_index_metadata or manage index privilege for * the target data stream, index, or alias. * * @see Documentation * on elastic.co */ public CompletableFuture searchShards() { return this.transport.performRequestAsync(new SearchShardsRequest.Builder().build(), SearchShardsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: search_template /** * Run a search with a search template. * * @see Documentation * on elastic.co */ public CompletableFuture> searchTemplate( SearchTemplateRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchTemplateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.search_template.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a search with a search template. * * @param fn * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> searchTemplate( Function> fn, Class tDocumentClass) { return searchTemplate(fn.apply(new SearchTemplateRequest.Builder()).build(), tDocumentClass); } /** * Overload of {@link #searchTemplate(SearchTemplateRequest, Class)}, where * Class is defined as Void, meaning the documents will not be deserialized. */ public CompletableFuture> searchTemplate(SearchTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Overload of {@link #searchTemplate(Function, Class)}, where Class is defined * as Void, meaning the documents will not be deserialized. */ public final CompletableFuture> searchTemplate( Function> fn) { return searchTemplate(fn.apply(new SearchTemplateRequest.Builder()).build(), Void.class); } /** * Run a search with a search template. * * @see Documentation * on elastic.co */ public CompletableFuture> searchTemplate( SearchTemplateRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) SearchTemplateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.search_template.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Run a search with a search template. * * @param fn * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> searchTemplate( Function> fn, Type tDocumentType) { return searchTemplate(fn.apply(new SearchTemplateRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: terms_enum /** * Get terms in an index. *

* Discover terms that match a partial string in an index. This API is designed * for low-latency look-ups used in auto-complete scenarios.

*

* info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. *

*
* * @see Documentation * on elastic.co */ public CompletableFuture termsEnum(TermsEnumRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) TermsEnumRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get terms in an index. *

* Discover terms that match a partial string in an index. This API is designed * for low-latency look-ups used in auto-complete scenarios.

*

* info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. *

*
* * @param fn * a function that initializes a builder to create the * {@link TermsEnumRequest} * @see Documentation * on elastic.co */ public final CompletableFuture termsEnum( Function> fn) { return termsEnum(fn.apply(new TermsEnumRequest.Builder()).build()); } // ----- Endpoint: termvectors /** * Get term vector information. *

* Get information and statistics about terms in the fields of a particular * document. *

* You can retrieve term vectors for documents stored in the index or for * artificial documents passed in the body of the request. You can specify the * fields you are interested in through the fields parameter or by * adding the fields to the request body. For example: * *

	 * GET /my-index-000001/_termvectors/1?fields=message
	 * 
	 * 
*

* Fields can be specified using wildcards, similar to the multi match query. *

* Term vectors are real-time by default, not near real-time. This can be * changed by setting realtime parameter to false. *

* You can request three types of values: term information, term * statistics, and field statistics. By default, all term * information and field statistics are returned for all fields but term * statistics are excluded. *

* Term information *

    *
  • term frequency in the field (always returned)
  • *
  • term positions (positions: true)
  • *
  • start and end offsets (offsets: true)
  • *
  • term payloads (payloads: true), as base64 encoded bytes
  • *
*

* If the requested information wasn't stored in the index, it will be computed * on the fly if possible. Additionally, term vectors could be computed for * documents not even existing in the index, but instead provided by the user. *

*

* warn Start and end offsets assume UTF-16 encoding is being used. If you want * to use these offsets in order to get the original text that produced this * token, you should make sure that the string you are taking a sub-string of is * also encoded using UTF-16. *

*
*

* Behaviour *

* The term and field statistics are not accurate. Deleted documents are not * taken into account. The information is only retrieved for the shard the * requested document resides in. The term and field statistics are therefore * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use * routing only to hit a particular shard. Refer to the linked * documentation for detailed examples of how to use this API. * * @see Documentation * on elastic.co */ public CompletableFuture termvectors(TermvectorsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint, TermvectorsResponse, ErrorResponse> endpoint = (JsonEndpoint, TermvectorsResponse, ErrorResponse>) TermvectorsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get term vector information. *

* Get information and statistics about terms in the fields of a particular * document. *

* You can retrieve term vectors for documents stored in the index or for * artificial documents passed in the body of the request. You can specify the * fields you are interested in through the fields parameter or by * adding the fields to the request body. For example: * *

	 * GET /my-index-000001/_termvectors/1?fields=message
	 * 
	 * 
*

* Fields can be specified using wildcards, similar to the multi match query. *

* Term vectors are real-time by default, not near real-time. This can be * changed by setting realtime parameter to false. *

* You can request three types of values: term information, term * statistics, and field statistics. By default, all term * information and field statistics are returned for all fields but term * statistics are excluded. *

* Term information *

    *
  • term frequency in the field (always returned)
  • *
  • term positions (positions: true)
  • *
  • start and end offsets (offsets: true)
  • *
  • term payloads (payloads: true), as base64 encoded bytes
  • *
*

* If the requested information wasn't stored in the index, it will be computed * on the fly if possible. Additionally, term vectors could be computed for * documents not even existing in the index, but instead provided by the user. *

*

* warn Start and end offsets assume UTF-16 encoding is being used. If you want * to use these offsets in order to get the original text that produced this * token, you should make sure that the string you are taking a sub-string of is * also encoded using UTF-16. *

*
*

* Behaviour *

* The term and field statistics are not accurate. Deleted documents are not * taken into account. The information is only retrieved for the shard the * requested document resides in. The term and field statistics are therefore * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use * routing only to hit a particular shard. Refer to the linked * documentation for detailed examples of how to use this API. * * @param fn * a function that initializes a builder to create the * {@link TermvectorsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture termvectors( Function, ObjectBuilder>> fn) { return termvectors(fn.apply(new TermvectorsRequest.Builder()).build()); } // ----- Endpoint: update /** * Update a document. *

* Update a document by running a script or passing a partial document. *

* If the Elasticsearch security features are enabled, you must have the * index or write index privilege for the target index * or index alias. *

* The script can update, delete, or skip modifying the document. The API also * supports passing a partial document, which is merged into the existing * document. To fully replace an existing document, use the index API. This * operation: *

    *
  • Gets the document (collocated with the shard) from the index.
  • *
  • Runs the specified script.
  • *
  • Indexes the result.
  • *
*

* The document must still be reindexed, but using this API removes some network * roundtrips and reduces chances of version conflicts between the GET and the * index operation. *

* The _source field must be enabled to use this API. In addition * to _source, you can access the following variables through the * ctx map: _index, _type, * _id, _version, _routing, and * _now (the current timestamp). * * @see Documentation * on elastic.co */ public CompletableFuture> update( UpdateRequest request, Class tDocumentClass) { @SuppressWarnings("unchecked") JsonEndpoint, UpdateResponse, ErrorResponse> endpoint = (JsonEndpoint, UpdateResponse, ErrorResponse>) UpdateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.update.Response.TDocument", getDeserializer(tDocumentClass)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update a document. *

* Update a document by running a script or passing a partial document. *

* If the Elasticsearch security features are enabled, you must have the * index or write index privilege for the target index * or index alias. *

* The script can update, delete, or skip modifying the document. The API also * supports passing a partial document, which is merged into the existing * document. To fully replace an existing document, use the index API. This * operation: *

    *
  • Gets the document (collocated with the shard) from the index.
  • *
  • Runs the specified script.
  • *
  • Indexes the result.
  • *
*

* The document must still be reindexed, but using this API removes some network * roundtrips and reduces chances of version conflicts between the GET and the * index operation. *

* The _source field must be enabled to use this API. In addition * to _source, you can access the following variables through the * ctx map: _index, _type, * _id, _version, _routing, and * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> update( Function, ObjectBuilder>> fn, Class tDocumentClass) { return update(fn.apply(new UpdateRequest.Builder()).build(), tDocumentClass); } /** * Update a document. *

* Update a document by running a script or passing a partial document. *

* If the Elasticsearch security features are enabled, you must have the * index or write index privilege for the target index * or index alias. *

* The script can update, delete, or skip modifying the document. The API also * supports passing a partial document, which is merged into the existing * document. To fully replace an existing document, use the index API. This * operation: *

    *
  • Gets the document (collocated with the shard) from the index.
  • *
  • Runs the specified script.
  • *
  • Indexes the result.
  • *
*

* The document must still be reindexed, but using this API removes some network * roundtrips and reduces chances of version conflicts between the GET and the * index operation. *

* The _source field must be enabled to use this API. In addition * to _source, you can access the following variables through the * ctx map: _index, _type, * _id, _version, _routing, and * _now (the current timestamp). * * @see Documentation * on elastic.co */ public CompletableFuture> update( UpdateRequest request, Type tDocumentType) { @SuppressWarnings("unchecked") JsonEndpoint, UpdateResponse, ErrorResponse> endpoint = (JsonEndpoint, UpdateResponse, ErrorResponse>) UpdateRequest._ENDPOINT; endpoint = new EndpointWithResponseMapperAttr<>(endpoint, "co.elastic.clients:Deserializer:_global.update.Response.TDocument", getDeserializer(tDocumentType)); return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update a document. *

* Update a document by running a script or passing a partial document. *

* If the Elasticsearch security features are enabled, you must have the * index or write index privilege for the target index * or index alias. *

* The script can update, delete, or skip modifying the document. The API also * supports passing a partial document, which is merged into the existing * document. To fully replace an existing document, use the index API. This * operation: *

    *
  • Gets the document (collocated with the shard) from the index.
  • *
  • Runs the specified script.
  • *
  • Indexes the result.
  • *
*

* The document must still be reindexed, but using this API removes some network * roundtrips and reduces chances of version conflicts between the GET and the * index operation. *

* The _source field must be enabled to use this API. In addition * to _source, you can access the following variables through the * ctx map: _index, _type, * _id, _version, _routing, and * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture> update( Function, ObjectBuilder>> fn, Type tDocumentType) { return update(fn.apply(new UpdateRequest.Builder()).build(), tDocumentType); } // ----- Endpoint: update_by_query /** * Update documents. Updates documents that match the specified query. If no * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or alias: *

    *
  • read
  • *
  • index or write
  • *
*

* You can specify the query criteria in the request URI or the request body * using the same syntax as the search API. *

* When you submit an update by query request, Elasticsearch gets a snapshot of * the data stream or index when it begins processing the request and updates * matching documents using internal versioning. When the versions match, the * document is updated and the version number is incremented. If a document * changes between the time that the snapshot is taken and the update operation * is processed, it results in a version conflict and the operation fails. You * can opt to count version conflicts instead of halting and returning by * setting conflicts to proceed. Note that if you opt * to count version conflicts, the operation could attempt to update more * documents from the source than max_docs until it has * successfully updated max_docs documents or it has gone through * every document in the source query. *

* NOTE: Documents with a version equal to 0 cannot be updated using update by * query because internal versioning does not support 0 as a valid version * number. *

* While processing an update by query request, Elasticsearch performs multiple * search requests sequentially to find all of the matching documents. A bulk * update request is performed for each batch of matching documents. Any query * or update failures cause the update by query request to fail and the failures * are shown in the response. Any update requests that completed successfully * still stick, they are not rolled back. *

* Throttling update requests *

* To control the rate at which update by query issues batches of update * operations, you can set requests_per_second to any positive * decimal number. This pads each batch with a wait time to throttle the rate. * Set requests_per_second to -1 to turn off * throttling. *

* Throttling uses a wait time between batches so that the internal scroll * requests can be given a timeout that takes the request padding into account. * The padding time is the difference between the batch size divided by the * requests_per_second and the time spent writing. By default the * batch size is 1000, so if requests_per_second is set to * 500: * *

	 * target_time = 1000 / 500 per second = 2 seconds
	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
	 * 
	 * 
*

* Since the batch is issued as a single _bulk request, large batch sizes cause * Elasticsearch to create many requests and wait before starting the next set. * This is "bursty" instead of "smooth". *

* Slicing *

* Update by query supports sliced scroll to parallelize the update process. * This can improve efficiency and provide a convenient way to break the request * down into smaller parts. *

* Setting slices to auto chooses a reasonable number * for most data streams and indices. This setting will use one slice per shard, * up to a certain limit. If there are multiple source data streams or indices, * it will choose the number of slices based on the index or backing index with * the smallest number of shards. *

* Adding slices to _update_by_query just automates * the manual process of creating sub-requests, which means it has some quirks: *

    *
  • You can see these requests in the tasks APIs. These sub-requests are * "child" tasks of the task for the request with slices.
  • *
  • Fetching the status of the task for the request with slices * only contains the status of completed slices.
  • *
  • These sub-requests are individually addressable for things like * cancellation and rethrottling.
  • *
  • Rethrottling the request with slices will rethrottle the * unfinished sub-request proportionally.
  • *
  • Canceling the request with slices will cancel each sub-request.
  • *
  • Due to the nature of slices each sub-request won't get a perfectly even * portion of the documents. All documents will be addressed, but some slices * may be larger than others. Expect larger slices to have a more even * distribution.
  • *
  • Parameters like requests_per_second and * max_docs on a request with slices are distributed proportionally * to each sub-request. Combine that with the point above about distribution * being uneven and you should conclude that using max_docs with * slices might not result in exactly max_docs * documents being updated.
  • *
  • Each sub-request gets a slightly different snapshot of the source data * stream or index though these are all taken at approximately the same * time.
  • *
*

* If you're slicing manually or otherwise tuning automatic slicing, keep in * mind that: *

    *
  • Query performance is most efficient when the number of slices is equal to * the number of shards in the index or backing index. If that number is large * (for example, 500), choose a lower number as too many slices hurts * performance. Setting slices higher than the number of shards generally does * not improve efficiency and adds overhead.
  • *
  • Update performance scales linearly across available resources with the * number of slices.
  • *
*

* Whether query or update performance dominates the runtime depends on the * documents being reindexed and cluster resources. *

* Update the document source *

* Update by query supports scripts to update the document source. As with the * update API, you can set ctx.op to change the operation that is * performed. *

* Set ctx.op = "noop" if your script decides that it * doesn't have to make any changes. The update by query operation skips * updating the document and increments the noop counter. *

* Set ctx.op = "delete" if your script decides that the * document should be deleted. The update by query operation deletes the * document and increments the deleted counter. *

* Update by query supports only index, noop, and * delete. Setting ctx.op to anything else is an * error. Setting any other field in ctx is an error. This API * enables you to only modify the source of matching documents; you cannot move * them. * * @see Documentation * on elastic.co */ public CompletableFuture updateByQuery(UpdateByQueryRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) UpdateByQueryRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update documents. Updates documents that match the specified query. If no * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. *

* If the Elasticsearch security features are enabled, you must have the * following index privileges for the target data stream, index, or alias: *

    *
  • read
  • *
  • index or write
  • *
*

* You can specify the query criteria in the request URI or the request body * using the same syntax as the search API. *

* When you submit an update by query request, Elasticsearch gets a snapshot of * the data stream or index when it begins processing the request and updates * matching documents using internal versioning. When the versions match, the * document is updated and the version number is incremented. If a document * changes between the time that the snapshot is taken and the update operation * is processed, it results in a version conflict and the operation fails. You * can opt to count version conflicts instead of halting and returning by * setting conflicts to proceed. Note that if you opt * to count version conflicts, the operation could attempt to update more * documents from the source than max_docs until it has * successfully updated max_docs documents or it has gone through * every document in the source query. *

* NOTE: Documents with a version equal to 0 cannot be updated using update by * query because internal versioning does not support 0 as a valid version * number. *

* While processing an update by query request, Elasticsearch performs multiple * search requests sequentially to find all of the matching documents. A bulk * update request is performed for each batch of matching documents. Any query * or update failures cause the update by query request to fail and the failures * are shown in the response. Any update requests that completed successfully * still stick, they are not rolled back. *

* Throttling update requests *

* To control the rate at which update by query issues batches of update * operations, you can set requests_per_second to any positive * decimal number. This pads each batch with a wait time to throttle the rate. * Set requests_per_second to -1 to turn off * throttling. *

* Throttling uses a wait time between batches so that the internal scroll * requests can be given a timeout that takes the request padding into account. * The padding time is the difference between the batch size divided by the * requests_per_second and the time spent writing. By default the * batch size is 1000, so if requests_per_second is set to * 500: * *

	 * target_time = 1000 / 500 per second = 2 seconds
	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
	 * 
	 * 
*

* Since the batch is issued as a single _bulk request, large batch sizes cause * Elasticsearch to create many requests and wait before starting the next set. * This is "bursty" instead of "smooth". *

* Slicing *

* Update by query supports sliced scroll to parallelize the update process. * This can improve efficiency and provide a convenient way to break the request * down into smaller parts. *

* Setting slices to auto chooses a reasonable number * for most data streams and indices. This setting will use one slice per shard, * up to a certain limit. If there are multiple source data streams or indices, * it will choose the number of slices based on the index or backing index with * the smallest number of shards. *

* Adding slices to _update_by_query just automates * the manual process of creating sub-requests, which means it has some quirks: *

    *
  • You can see these requests in the tasks APIs. These sub-requests are * "child" tasks of the task for the request with slices.
  • *
  • Fetching the status of the task for the request with slices * only contains the status of completed slices.
  • *
  • These sub-requests are individually addressable for things like * cancellation and rethrottling.
  • *
  • Rethrottling the request with slices will rethrottle the * unfinished sub-request proportionally.
  • *
  • Canceling the request with slices will cancel each sub-request.
  • *
  • Due to the nature of slices each sub-request won't get a perfectly even * portion of the documents. All documents will be addressed, but some slices * may be larger than others. Expect larger slices to have a more even * distribution.
  • *
  • Parameters like requests_per_second and * max_docs on a request with slices are distributed proportionally * to each sub-request. Combine that with the point above about distribution * being uneven and you should conclude that using max_docs with * slices might not result in exactly max_docs * documents being updated.
  • *
  • Each sub-request gets a slightly different snapshot of the source data * stream or index though these are all taken at approximately the same * time.
  • *
*

* If you're slicing manually or otherwise tuning automatic slicing, keep in * mind that: *

    *
  • Query performance is most efficient when the number of slices is equal to * the number of shards in the index or backing index. If that number is large * (for example, 500), choose a lower number as too many slices hurts * performance. Setting slices higher than the number of shards generally does * not improve efficiency and adds overhead.
  • *
  • Update performance scales linearly across available resources with the * number of slices.
  • *
*

* Whether query or update performance dominates the runtime depends on the * documents being reindexed and cluster resources. *

* Update the document source *

* Update by query supports scripts to update the document source. As with the * update API, you can set ctx.op to change the operation that is * performed. *

* Set ctx.op = "noop" if your script decides that it * doesn't have to make any changes. The update by query operation skips * updating the document and increments the noop counter. *

* Set ctx.op = "delete" if your script decides that the * document should be deleted. The update by query operation deletes the * document and increments the deleted counter. *

* Update by query supports only index, noop, and * delete. Setting ctx.op to anything else is an * error. Setting any other field in ctx is an error. This API * enables you to only modify the source of matching documents; you cannot move * them. * * @param fn * a function that initializes a builder to create the * {@link UpdateByQueryRequest} * @see Documentation * on elastic.co */ public final CompletableFuture updateByQuery( Function> fn) { return updateByQuery(fn.apply(new UpdateByQueryRequest.Builder()).build()); } // ----- Endpoint: update_by_query_rethrottle /** * Throttle an update by query operation. *

* Change the number of requests per second for a particular update by query * operation. Rethrottling that speeds up the query takes effect immediately but * rethrotting that slows down the query takes effect after completing the * current batch to prevent scroll timeouts. * * @see Documentation * on elastic.co */ public CompletableFuture updateByQueryRethrottle( UpdateByQueryRethrottleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) UpdateByQueryRethrottleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Throttle an update by query operation. *

* Change the number of requests per second for a particular update by query * operation. Rethrottling that speeds up the query takes effect immediately but * rethrotting that slows down the query takes effect after completing the * current batch to prevent scroll timeouts. * * @param fn * a function that initializes a builder to create the * {@link UpdateByQueryRethrottleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture updateByQueryRethrottle( Function> fn) { return updateByQueryRethrottle(fn.apply(new UpdateByQueryRethrottleRequest.Builder()).build()); } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy