
co.elastic.clients.elasticsearch.ElasticsearchAsyncClient Maven / Gradle / Ivy
Show all versions of org.apache.servicemix.bundles.elasticsearch-java
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package co.elastic.clients.elasticsearch;
import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.elasticsearch.async_search.ElasticsearchAsyncSearchAsyncClient;
import co.elastic.clients.elasticsearch.autoscaling.ElasticsearchAutoscalingAsyncClient;
import co.elastic.clients.elasticsearch.cat.ElasticsearchCatAsyncClient;
import co.elastic.clients.elasticsearch.ccr.ElasticsearchCcrAsyncClient;
import co.elastic.clients.elasticsearch.cluster.ElasticsearchClusterAsyncClient;
import co.elastic.clients.elasticsearch.connector.ElasticsearchConnectorAsyncClient;
import co.elastic.clients.elasticsearch.core.BulkRequest;
import co.elastic.clients.elasticsearch.core.BulkResponse;
import co.elastic.clients.elasticsearch.core.ClearScrollRequest;
import co.elastic.clients.elasticsearch.core.ClearScrollResponse;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeRequest;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeResponse;
import co.elastic.clients.elasticsearch.core.CountRequest;
import co.elastic.clients.elasticsearch.core.CountResponse;
import co.elastic.clients.elasticsearch.core.CreateRequest;
import co.elastic.clients.elasticsearch.core.CreateResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.DeleteRequest;
import co.elastic.clients.elasticsearch.core.DeleteResponse;
import co.elastic.clients.elasticsearch.core.DeleteScriptRequest;
import co.elastic.clients.elasticsearch.core.DeleteScriptResponse;
import co.elastic.clients.elasticsearch.core.ExistsRequest;
import co.elastic.clients.elasticsearch.core.ExistsSourceRequest;
import co.elastic.clients.elasticsearch.core.ExplainRequest;
import co.elastic.clients.elasticsearch.core.ExplainResponse;
import co.elastic.clients.elasticsearch.core.FieldCapsRequest;
import co.elastic.clients.elasticsearch.core.FieldCapsResponse;
import co.elastic.clients.elasticsearch.core.GetRequest;
import co.elastic.clients.elasticsearch.core.GetResponse;
import co.elastic.clients.elasticsearch.core.GetScriptContextRequest;
import co.elastic.clients.elasticsearch.core.GetScriptContextResponse;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesRequest;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesResponse;
import co.elastic.clients.elasticsearch.core.GetScriptRequest;
import co.elastic.clients.elasticsearch.core.GetScriptResponse;
import co.elastic.clients.elasticsearch.core.GetSourceRequest;
import co.elastic.clients.elasticsearch.core.GetSourceResponse;
import co.elastic.clients.elasticsearch.core.HealthReportRequest;
import co.elastic.clients.elasticsearch.core.HealthReportResponse;
import co.elastic.clients.elasticsearch.core.IndexRequest;
import co.elastic.clients.elasticsearch.core.IndexResponse;
import co.elastic.clients.elasticsearch.core.InfoRequest;
import co.elastic.clients.elasticsearch.core.InfoResponse;
import co.elastic.clients.elasticsearch.core.MgetRequest;
import co.elastic.clients.elasticsearch.core.MgetResponse;
import co.elastic.clients.elasticsearch.core.MsearchRequest;
import co.elastic.clients.elasticsearch.core.MsearchResponse;
import co.elastic.clients.elasticsearch.core.MsearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.MsearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.MtermvectorsRequest;
import co.elastic.clients.elasticsearch.core.MtermvectorsResponse;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeRequest;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeResponse;
import co.elastic.clients.elasticsearch.core.PingRequest;
import co.elastic.clients.elasticsearch.core.PutScriptRequest;
import co.elastic.clients.elasticsearch.core.PutScriptResponse;
import co.elastic.clients.elasticsearch.core.RankEvalRequest;
import co.elastic.clients.elasticsearch.core.RankEvalResponse;
import co.elastic.clients.elasticsearch.core.ReindexRequest;
import co.elastic.clients.elasticsearch.core.ReindexResponse;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleRequest;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleResponse;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteRequest;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteResponse;
import co.elastic.clients.elasticsearch.core.ScrollRequest;
import co.elastic.clients.elasticsearch.core.ScrollResponse;
import co.elastic.clients.elasticsearch.core.SearchMvtRequest;
import co.elastic.clients.elasticsearch.core.SearchRequest;
import co.elastic.clients.elasticsearch.core.SearchResponse;
import co.elastic.clients.elasticsearch.core.SearchShardsRequest;
import co.elastic.clients.elasticsearch.core.SearchShardsResponse;
import co.elastic.clients.elasticsearch.core.SearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.SearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.TermsEnumRequest;
import co.elastic.clients.elasticsearch.core.TermsEnumResponse;
import co.elastic.clients.elasticsearch.core.TermvectorsRequest;
import co.elastic.clients.elasticsearch.core.TermvectorsResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.UpdateRequest;
import co.elastic.clients.elasticsearch.core.UpdateResponse;
import co.elastic.clients.elasticsearch.dangling_indices.ElasticsearchDanglingIndicesAsyncClient;
import co.elastic.clients.elasticsearch.enrich.ElasticsearchEnrichAsyncClient;
import co.elastic.clients.elasticsearch.eql.ElasticsearchEqlAsyncClient;
import co.elastic.clients.elasticsearch.esql.ElasticsearchEsqlAsyncClient;
import co.elastic.clients.elasticsearch.features.ElasticsearchFeaturesAsyncClient;
import co.elastic.clients.elasticsearch.fleet.ElasticsearchFleetAsyncClient;
import co.elastic.clients.elasticsearch.graph.ElasticsearchGraphAsyncClient;
import co.elastic.clients.elasticsearch.ilm.ElasticsearchIlmAsyncClient;
import co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesAsyncClient;
import co.elastic.clients.elasticsearch.inference.ElasticsearchInferenceAsyncClient;
import co.elastic.clients.elasticsearch.ingest.ElasticsearchIngestAsyncClient;
import co.elastic.clients.elasticsearch.license.ElasticsearchLicenseAsyncClient;
import co.elastic.clients.elasticsearch.logstash.ElasticsearchLogstashAsyncClient;
import co.elastic.clients.elasticsearch.migration.ElasticsearchMigrationAsyncClient;
import co.elastic.clients.elasticsearch.ml.ElasticsearchMlAsyncClient;
import co.elastic.clients.elasticsearch.monitoring.ElasticsearchMonitoringAsyncClient;
import co.elastic.clients.elasticsearch.nodes.ElasticsearchNodesAsyncClient;
import co.elastic.clients.elasticsearch.query_rules.ElasticsearchQueryRulesAsyncClient;
import co.elastic.clients.elasticsearch.rollup.ElasticsearchRollupAsyncClient;
import co.elastic.clients.elasticsearch.search_application.ElasticsearchSearchApplicationAsyncClient;
import co.elastic.clients.elasticsearch.searchable_snapshots.ElasticsearchSearchableSnapshotsAsyncClient;
import co.elastic.clients.elasticsearch.security.ElasticsearchSecurityAsyncClient;
import co.elastic.clients.elasticsearch.shutdown.ElasticsearchShutdownAsyncClient;
import co.elastic.clients.elasticsearch.simulate.ElasticsearchSimulateAsyncClient;
import co.elastic.clients.elasticsearch.slm.ElasticsearchSlmAsyncClient;
import co.elastic.clients.elasticsearch.snapshot.ElasticsearchSnapshotAsyncClient;
import co.elastic.clients.elasticsearch.sql.ElasticsearchSqlAsyncClient;
import co.elastic.clients.elasticsearch.ssl.ElasticsearchSslAsyncClient;
import co.elastic.clients.elasticsearch.synonyms.ElasticsearchSynonymsAsyncClient;
import co.elastic.clients.elasticsearch.tasks.ElasticsearchTasksAsyncClient;
import co.elastic.clients.elasticsearch.text_structure.ElasticsearchTextStructureAsyncClient;
import co.elastic.clients.elasticsearch.transform.ElasticsearchTransformAsyncClient;
import co.elastic.clients.elasticsearch.watcher.ElasticsearchWatcherAsyncClient;
import co.elastic.clients.elasticsearch.xpack.ElasticsearchXpackAsyncClient;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.ElasticsearchTransportConfig;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BinaryResponse;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.transport.endpoints.EndpointWithResponseMapperAttr;
import co.elastic.clients.util.ObjectBuilder;
import java.lang.reflect.Type;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.annotation.Nullable;
//----------------------------------------------------------------
// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------
/**
* Client for the namespace.
*/
public class ElasticsearchAsyncClient extends ApiClient {
/**
* Creates a client from a {@link ElasticsearchTransportConfig.Default}}
* configuration created with an inline lambda expression.
*/
public static ElasticsearchAsyncClient of(
Function fn) {
return new ElasticsearchAsyncClient(
fn.apply(new ElasticsearchTransportConfig.Builder()).build().buildTransport());
}
/**
* Creates a client from an {@link ElasticsearchTransportConfig}.
*/
public ElasticsearchAsyncClient(ElasticsearchTransportConfig config) {
this(config.buildTransport());
}
public ElasticsearchAsyncClient(ElasticsearchTransport transport) {
super(transport, null);
}
public ElasticsearchAsyncClient(ElasticsearchTransport transport, @Nullable TransportOptions transportOptions) {
super(transport, transportOptions);
}
@Override
public ElasticsearchAsyncClient withTransportOptions(@Nullable TransportOptions transportOptions) {
return new ElasticsearchAsyncClient(this.transport, transportOptions);
}
// ----- Child clients
public ElasticsearchAsyncSearchAsyncClient asyncSearch() {
return new ElasticsearchAsyncSearchAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchAutoscalingAsyncClient autoscaling() {
return new ElasticsearchAutoscalingAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchCatAsyncClient cat() {
return new ElasticsearchCatAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchCcrAsyncClient ccr() {
return new ElasticsearchCcrAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchClusterAsyncClient cluster() {
return new ElasticsearchClusterAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchConnectorAsyncClient connector() {
return new ElasticsearchConnectorAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchDanglingIndicesAsyncClient danglingIndices() {
return new ElasticsearchDanglingIndicesAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchEnrichAsyncClient enrich() {
return new ElasticsearchEnrichAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchEqlAsyncClient eql() {
return new ElasticsearchEqlAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchEsqlAsyncClient esql() {
return new ElasticsearchEsqlAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchFeaturesAsyncClient features() {
return new ElasticsearchFeaturesAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchFleetAsyncClient fleet() {
return new ElasticsearchFleetAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchGraphAsyncClient graph() {
return new ElasticsearchGraphAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchIlmAsyncClient ilm() {
return new ElasticsearchIlmAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchIndicesAsyncClient indices() {
return new ElasticsearchIndicesAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchInferenceAsyncClient inference() {
return new ElasticsearchInferenceAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchIngestAsyncClient ingest() {
return new ElasticsearchIngestAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchLicenseAsyncClient license() {
return new ElasticsearchLicenseAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchLogstashAsyncClient logstash() {
return new ElasticsearchLogstashAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchMigrationAsyncClient migration() {
return new ElasticsearchMigrationAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchMlAsyncClient ml() {
return new ElasticsearchMlAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchMonitoringAsyncClient monitoring() {
return new ElasticsearchMonitoringAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchNodesAsyncClient nodes() {
return new ElasticsearchNodesAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchQueryRulesAsyncClient queryRules() {
return new ElasticsearchQueryRulesAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchRollupAsyncClient rollup() {
return new ElasticsearchRollupAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSearchApplicationAsyncClient searchApplication() {
return new ElasticsearchSearchApplicationAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSearchableSnapshotsAsyncClient searchableSnapshots() {
return new ElasticsearchSearchableSnapshotsAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSecurityAsyncClient security() {
return new ElasticsearchSecurityAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchShutdownAsyncClient shutdown() {
return new ElasticsearchShutdownAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSimulateAsyncClient simulate() {
return new ElasticsearchSimulateAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSlmAsyncClient slm() {
return new ElasticsearchSlmAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSnapshotAsyncClient snapshot() {
return new ElasticsearchSnapshotAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSqlAsyncClient sql() {
return new ElasticsearchSqlAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSslAsyncClient ssl() {
return new ElasticsearchSslAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchSynonymsAsyncClient synonyms() {
return new ElasticsearchSynonymsAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchTasksAsyncClient tasks() {
return new ElasticsearchTasksAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchTextStructureAsyncClient textStructure() {
return new ElasticsearchTextStructureAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchTransformAsyncClient transform() {
return new ElasticsearchTransformAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchWatcherAsyncClient watcher() {
return new ElasticsearchWatcherAsyncClient(this.transport, this.transportOptions);
}
public ElasticsearchXpackAsyncClient xpack() {
return new ElasticsearchXpackAsyncClient(this.transport, this.transportOptions);
}
// ----- Endpoint: bulk
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture bulk(BulkRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) BulkRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @param fn
* a function that initializes a builder to create the
* {@link BulkRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture bulk(Function> fn) {
return bulk(fn.apply(new BulkRequest.Builder()).build());
}
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture bulk() {
return this.transport.performRequestAsync(new BulkRequest.Builder().build(), BulkRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: clear_scroll
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture clearScroll(ClearScrollRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClearScrollRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClearScrollRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture clearScroll(
Function> fn) {
return clearScroll(fn.apply(new ClearScrollRequest.Builder()).build());
}
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture clearScroll() {
return this.transport.performRequestAsync(new ClearScrollRequest.Builder().build(),
ClearScrollRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: close_point_in_time
/**
* Close a point in time. A point in time must be opened explicitly before being
* used in search requests. The keep_alive
parameter tells
* Elasticsearch how long it should persist. A point in time is automatically
* closed when the keep_alive
period has elapsed. However, keeping
* points in time has a cost; close them as soon as they are no longer required
* for search requests.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture closePointInTime(ClosePointInTimeRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClosePointInTimeRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Close a point in time. A point in time must be opened explicitly before being
* used in search requests. The keep_alive
parameter tells
* Elasticsearch how long it should persist. A point in time is automatically
* closed when the keep_alive
period has elapsed. However, keeping
* points in time has a cost; close them as soon as they are no longer required
* for search requests.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClosePointInTimeRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture closePointInTime(
Function> fn) {
return closePointInTime(fn.apply(new ClosePointInTimeRequest.Builder()).build());
}
// ----- Endpoint: count
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture count(CountRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CountRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @param fn
* a function that initializes a builder to create the
* {@link CountRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture count(
Function> fn) {
return count(fn.apply(new CountRequest.Builder()).build());
}
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture count() {
return this.transport.performRequestAsync(new CountRequest.Builder().build(), CountRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: create
/**
* Create a new document in the index.
*
* You can index a new JSON document with the /<target>/_doc/
* or /<target>/_create/<_id>
APIs Using
* _create
guarantees that the document is indexed only if it does
* not already exist. It returns a 409 response when a document with a same ID
* already exists in the index. To update an existing document, you must use the
* /<target>/_doc/
API.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add a document using the
*
PUT /<target>/_create/<_id>
or
* POST /<target>/_create/<_id>
request formats, you
* must have the create_doc
, create
,
* index
, or write
index privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture create(CreateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, CreateResponse, ErrorResponse> endpoint = (JsonEndpoint, CreateResponse, ErrorResponse>) CreateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Create a new document in the index.
*
* You can index a new JSON document with the /<target>/_doc/
* or /<target>/_create/<_id>
APIs Using
* _create
guarantees that the document is indexed only if it does
* not already exist. It returns a 409 response when a document with a same ID
* already exists in the index. To update an existing document, you must use the
* /<target>/_doc/
API.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add a document using the
*
PUT /<target>/_create/<_id>
or
* POST /<target>/_create/<_id>
request formats, you
* must have the create_doc
, create
,
* index
, or write
index privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture create(
Function, ObjectBuilder>> fn) {
return create(fn.apply(new CreateRequest.Builder()).build());
}
// ----- Endpoint: delete
/**
* Delete a document.
*
* Remove a JSON document from the specified index.
*
* NOTE: You cannot send deletion requests directly to a data stream. To delete
* a document in a data stream, you must target the backing index containing the
* document.
*
* Optimistic concurrency control
*
* Delete operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Versioning
*
* Each document indexed is versioned. When deleting a document, the version can
* be specified to make sure the relevant document you are trying to delete is
* actually being deleted and it has not changed in the meantime. Every write
* operation run on a document, deletes included, causes its version to be
* incremented. The version number of a deleted document remains available for a
* short time after deletion to allow for control of concurrent operations. The
* length of time for which a deleted document's version remains available is
* determined by the index.gc_deletes
index setting.
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to delete a document.
*
* If the _routing
mapping is set to required
and no
* routing value is specified, the delete API throws a
* RoutingMissingException
and rejects the request.
*
* For example:
*
*
* DELETE /my-index-000001/_doc/1?routing=shard-1
*
*
*
* This request deletes the document with ID 1, but it is routed based on the
* user. The document is not deleted if the correct routing is not specified.
*
* Distributed
*
* The delete operation gets hashed into a specific shard ID. It then gets
* redirected into the primary shard within that ID group and replicated (if
* needed) to shard replicas within that ID group.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture delete(DeleteRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete a document.
*
* Remove a JSON document from the specified index.
*
* NOTE: You cannot send deletion requests directly to a data stream. To delete
* a document in a data stream, you must target the backing index containing the
* document.
*
* Optimistic concurrency control
*
* Delete operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Versioning
*
* Each document indexed is versioned. When deleting a document, the version can
* be specified to make sure the relevant document you are trying to delete is
* actually being deleted and it has not changed in the meantime. Every write
* operation run on a document, deletes included, causes its version to be
* incremented. The version number of a deleted document remains available for a
* short time after deletion to allow for control of concurrent operations. The
* length of time for which a deleted document's version remains available is
* determined by the index.gc_deletes
index setting.
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to delete a document.
*
* If the _routing
mapping is set to required
and no
* routing value is specified, the delete API throws a
* RoutingMissingException
and rejects the request.
*
* For example:
*
*
* DELETE /my-index-000001/_doc/1?routing=shard-1
*
*
*
* This request deletes the document with ID 1, but it is routed based on the
* user. The document is not deleted if the correct routing is not specified.
*
* Distributed
*
* The delete operation gets hashed into a specific shard ID. It then gets
* redirected into the primary shard within that ID group and replicated (if
* needed) to shard replicas within that ID group.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture delete(
Function> fn) {
return delete(fn.apply(new DeleteRequest.Builder()).build());
}
// ----- Endpoint: delete_by_query
/**
* Delete documents.
*
* Deletes documents that match the specified query.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or alias:
*
* read
* delete
or write
*
*
* You can specify the query criteria in the request URI or the request body
* using the same syntax as the search API. When you submit a delete by query
* request, Elasticsearch gets a snapshot of the data stream or index when it
* begins processing the request and deletes matching documents using internal
* versioning. If a document changes between the time that the snapshot is taken
* and the delete operation is processed, it results in a version conflict and
* the delete operation fails.
*
* NOTE: Documents with a version equal to 0 cannot be deleted using delete by
* query because internal versioning does not support 0 as a valid version
* number.
*
* While processing a delete by query request, Elasticsearch performs multiple
* search requests sequentially to find all of the matching documents to delete.
* A bulk delete request is performed for each batch of matching documents. If a
* search or bulk request is rejected, the requests are retried up to 10 times,
* with exponential back off. If the maximum retry limit is reached, processing
* halts and all failed requests are returned in the response. Any delete
* requests that completed successfully still stick, they are not rolled back.
*
* You can opt to count version conflicts instead of halting and returning by
* setting conflicts
to proceed
. Note that if you opt
* to count version conflicts the operation could attempt to delete more
* documents from the source than max_docs
until it has
* successfully deleted max_docs documents
, or it has gone through
* every document in the source query.
*
* Throttling delete requests
*
* To control the rate at which delete by query issues batches of delete
* operations, you can set requests_per_second
to any positive
* decimal number. This pads each batch with a wait time to throttle the rate.
* Set requests_per_second
to -1
to disable
* throttling.
*
* Throttling uses a wait time between batches so that the internal scroll
* requests can be given a timeout that takes the request padding into account.
* The padding time is the difference between the batch size divided by the
* requests_per_second
and the time spent writing. By default the
* batch size is 1000
, so if requests_per_second
is
* set to 500
:
*
*
* target_time = 1000 / 500 per second = 2 seconds
* wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
*
*
*
* Since the batch is issued as a single _bulk
request, large batch
* sizes cause Elasticsearch to create many requests and wait before starting
* the next set. This is "bursty" instead of "smooth".
*
* Slicing
*
* Delete by query supports sliced scroll to parallelize the delete process.
* This can improve efficiency and provide a convenient way to break the request
* down into smaller parts.
*
* Setting slices
to auto
lets Elasticsearch choose
* the number of slices to use. This setting will use one slice per shard, up to
* a certain limit. If there are multiple source data streams or indices, it
* will choose the number of slices based on the index or backing index with the
* smallest number of shards. Adding slices to the delete by query operation
* creates sub-requests which means it has some quirks:
*
* - You can see these requests in the tasks APIs. These sub-requests are
* "child" tasks of the task for the request with slices.
* - Fetching the status of the task for the request with slices only contains
* the status of completed slices.
* - These sub-requests are individually addressable for things like
* cancellation and rethrottling.
* - Rethrottling the request with
slices
will rethrottle the
* unfinished sub-request proportionally.
* - Canceling the request with
slices
will cancel each
* sub-request.
* - Due to the nature of
slices
each sub-request won't get a
* perfectly even portion of the documents. All documents will be addressed, but
* some slices may be larger than others. Expect larger slices to have a more
* even distribution.
* - Parameters like
requests_per_second
and
* max_docs
on a request with slices
are distributed
* proportionally to each sub-request. Combine that with the earlier point about
* distribution being uneven and you should conclude that using
* max_docs
with slices
might not result in exactly
* max_docs
documents being deleted.
* - Each sub-request gets a slightly different snapshot of the source data
* stream or index though these are all taken at approximately the same
* time.
*
*
* If you're slicing manually or otherwise tuning automatic slicing, keep in
* mind that:
*
* - Query performance is most efficient when the number of slices is equal to
* the number of shards in the index or backing index. If that number is large
* (for example, 500), choose a lower number as too many
slices
* hurts performance. Setting slices
higher than the number of
* shards generally does not improve efficiency and adds overhead.
* - Delete performance scales linearly across available resources with the
* number of slices.
*
*
* Whether query or delete performance dominates the runtime depends on the
* documents being reindexed and cluster resources.
*
* Cancel a delete by query operation
*
* Any delete by query can be canceled using the task cancel API. For example:
*
*
* POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
*
*
*
* The task ID can be found by using the get tasks API.
*
* Cancellation should happen quickly but might take a few seconds. The get task
* status API will continue to list the delete by query task until this task
* checks that it has been cancelled and terminates itself.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteByQuery(DeleteByQueryRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete documents.
*
* Deletes documents that match the specified query.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or alias:
*
* read
* delete
or write
*
*
* You can specify the query criteria in the request URI or the request body
* using the same syntax as the search API. When you submit a delete by query
* request, Elasticsearch gets a snapshot of the data stream or index when it
* begins processing the request and deletes matching documents using internal
* versioning. If a document changes between the time that the snapshot is taken
* and the delete operation is processed, it results in a version conflict and
* the delete operation fails.
*
* NOTE: Documents with a version equal to 0 cannot be deleted using delete by
* query because internal versioning does not support 0 as a valid version
* number.
*
* While processing a delete by query request, Elasticsearch performs multiple
* search requests sequentially to find all of the matching documents to delete.
* A bulk delete request is performed for each batch of matching documents. If a
* search or bulk request is rejected, the requests are retried up to 10 times,
* with exponential back off. If the maximum retry limit is reached, processing
* halts and all failed requests are returned in the response. Any delete
* requests that completed successfully still stick, they are not rolled back.
*
* You can opt to count version conflicts instead of halting and returning by
* setting conflicts
to proceed
. Note that if you opt
* to count version conflicts the operation could attempt to delete more
* documents from the source than max_docs
until it has
* successfully deleted max_docs documents
, or it has gone through
* every document in the source query.
*
* Throttling delete requests
*
* To control the rate at which delete by query issues batches of delete
* operations, you can set requests_per_second
to any positive
* decimal number. This pads each batch with a wait time to throttle the rate.
* Set requests_per_second
to -1
to disable
* throttling.
*
* Throttling uses a wait time between batches so that the internal scroll
* requests can be given a timeout that takes the request padding into account.
* The padding time is the difference between the batch size divided by the
* requests_per_second
and the time spent writing. By default the
* batch size is 1000
, so if requests_per_second
is
* set to 500
:
*
*
* target_time = 1000 / 500 per second = 2 seconds
* wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
*
*
*
* Since the batch is issued as a single _bulk
request, large batch
* sizes cause Elasticsearch to create many requests and wait before starting
* the next set. This is "bursty" instead of "smooth".
*
* Slicing
*
* Delete by query supports sliced scroll to parallelize the delete process.
* This can improve efficiency and provide a convenient way to break the request
* down into smaller parts.
*
* Setting slices
to auto
lets Elasticsearch choose
* the number of slices to use. This setting will use one slice per shard, up to
* a certain limit. If there are multiple source data streams or indices, it
* will choose the number of slices based on the index or backing index with the
* smallest number of shards. Adding slices to the delete by query operation
* creates sub-requests which means it has some quirks:
*
* - You can see these requests in the tasks APIs. These sub-requests are
* "child" tasks of the task for the request with slices.
* - Fetching the status of the task for the request with slices only contains
* the status of completed slices.
* - These sub-requests are individually addressable for things like
* cancellation and rethrottling.
* - Rethrottling the request with
slices
will rethrottle the
* unfinished sub-request proportionally.
* - Canceling the request with
slices
will cancel each
* sub-request.
* - Due to the nature of
slices
each sub-request won't get a
* perfectly even portion of the documents. All documents will be addressed, but
* some slices may be larger than others. Expect larger slices to have a more
* even distribution.
* - Parameters like
requests_per_second
and
* max_docs
on a request with slices
are distributed
* proportionally to each sub-request. Combine that with the earlier point about
* distribution being uneven and you should conclude that using
* max_docs
with slices
might not result in exactly
* max_docs
documents being deleted.
* - Each sub-request gets a slightly different snapshot of the source data
* stream or index though these are all taken at approximately the same
* time.
*
*
* If you're slicing manually or otherwise tuning automatic slicing, keep in
* mind that:
*
* - Query performance is most efficient when the number of slices is equal to
* the number of shards in the index or backing index. If that number is large
* (for example, 500), choose a lower number as too many
slices
* hurts performance. Setting slices
higher than the number of
* shards generally does not improve efficiency and adds overhead.
* - Delete performance scales linearly across available resources with the
* number of slices.
*
*
* Whether query or delete performance dominates the runtime depends on the
* documents being reindexed and cluster resources.
*
* Cancel a delete by query operation
*
* Any delete by query can be canceled using the task cancel API. For example:
*
*
* POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
*
*
*
* The task ID can be found by using the get tasks API.
*
* Cancellation should happen quickly but might take a few seconds. The get task
* status API will continue to list the delete by query task until this task
* checks that it has been cancelled and terminates itself.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteByQueryRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteByQuery(
Function> fn) {
return deleteByQuery(fn.apply(new DeleteByQueryRequest.Builder()).build());
}
// ----- Endpoint: delete_by_query_rethrottle
/**
* Throttle a delete by query operation.
*
* Change the number of requests per second for a particular delete by query
* operation. Rethrottling that speeds up the query takes effect immediately but
* rethrotting that slows down the query takes effect after completing the
* current batch to prevent scroll timeouts.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteByQueryRethrottle(
DeleteByQueryRethrottleRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRethrottleRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Throttle a delete by query operation.
*
* Change the number of requests per second for a particular delete by query
* operation. Rethrottling that speeds up the query takes effect immediately but
* rethrotting that slows down the query takes effect after completing the
* current batch to prevent scroll timeouts.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteByQueryRethrottleRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteByQueryRethrottle(
Function> fn) {
return deleteByQueryRethrottle(fn.apply(new DeleteByQueryRethrottleRequest.Builder()).build());
}
// ----- Endpoint: delete_script
/**
* Delete a script or search template. Deletes a stored script or search
* template.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteScript(DeleteScriptRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteScriptRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete a script or search template. Deletes a stored script or search
* template.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteScriptRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteScript(
Function> fn) {
return deleteScript(fn.apply(new DeleteScriptRequest.Builder()).build());
}
// ----- Endpoint: exists
/**
* Check a document.
*
* Verify that a document exists. For example, check to see if a document with
* the _id
0 exists:
*
*
* HEAD my-index-000001/_doc/0
*
*
*
* If the document exists, the API returns a status code of
* 200 - OK
. If the document doesn’t exist, the API returns
* 404 - Not Found
.
*
* Versioning support
*
* You can use the version
parameter to check the document only if
* its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture exists(ExistsRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check a document.
*
* Verify that a document exists. For example, check to see if a document with
* the _id
0 exists:
*
*
* HEAD my-index-000001/_doc/0
*
*
*
* If the document exists, the API returns a status code of
* 200 - OK
. If the document doesn’t exist, the API returns
* 404 - Not Found
.
*
* Versioning support
*
* You can use the version
parameter to check the document only if
* its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture exists(
Function> fn) {
return exists(fn.apply(new ExistsRequest.Builder()).build());
}
// ----- Endpoint: exists_source
/**
* Check for a document source.
*
* Check whether a document source exists in an index. For example:
*
*
* HEAD my-index-000001/_source/1
*
*
*
* A document's source is not available if it is disabled in the mapping.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture existsSource(ExistsSourceRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsSourceRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check for a document source.
*
* Check whether a document source exists in an index. For example:
*
*
* HEAD my-index-000001/_source/1
*
*
*
* A document's source is not available if it is disabled in the mapping.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsSourceRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture existsSource(
Function> fn) {
return existsSource(fn.apply(new ExistsSourceRequest.Builder()).build());
}
// ----- Endpoint: explain
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> explain(ExplainRequest request,
Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> explain(
Function> fn, Class tDocumentClass) {
return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #explain(ExplainRequest, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public CompletableFuture> explain(ExplainRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #explain(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final CompletableFuture> explain(
Function> fn) {
return explain(fn.apply(new ExplainRequest.Builder()).build(), Void.class);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> explain(ExplainRequest request,
Type tDocumentType) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> explain(
Function> fn, Type tDocumentType) {
return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: field_caps
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture fieldCaps(FieldCapsRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FieldCapsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @param fn
* a function that initializes a builder to create the
* {@link FieldCapsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture fieldCaps(
Function> fn) {
return fieldCaps(fn.apply(new FieldCapsRequest.Builder()).build());
}
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture fieldCaps() {
return this.transport.performRequestAsync(new FieldCapsRequest.Builder().build(), FieldCapsRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: get
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> get(GetRequest request,
Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> get(
Function> fn, Class tDocumentClass) {
return get(fn.apply(new GetRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #get(GetRequest, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public CompletableFuture> get(GetRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #get(Function, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public final CompletableFuture> get(Function> fn) {
return get(fn.apply(new GetRequest.Builder()).build(), Void.class);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> get(GetRequest request, Type tDocumentType) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> get(
Function> fn, Type tDocumentType) {
return get(fn.apply(new GetRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: get_script
/**
* Get a script or search template. Retrieves a stored script or search
* template.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getScript(GetScriptRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetScriptRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get a script or search template. Retrieves a stored script or search
* template.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetScriptRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getScript(
Function> fn) {
return getScript(fn.apply(new GetScriptRequest.Builder()).build());
}
// ----- Endpoint: get_script_context
/**
* Get script contexts.
*
* Get a list of supported script contexts and their methods.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getScriptContext() {
return this.transport.performRequestAsync(GetScriptContextRequest._INSTANCE, GetScriptContextRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: get_script_languages
/**
* Get script languages.
*
* Get a list of available script types, languages, and contexts.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getScriptLanguages() {
return this.transport.performRequestAsync(GetScriptLanguagesRequest._INSTANCE,
GetScriptLanguagesRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: get_source
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> getSource(GetSourceRequest request,
Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get_source.Response.TDocument",
getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link GetSourceRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> getSource(
Function> fn, Class tDocumentClass) {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #getSource(GetSourceRequest, Class)}, where Class is
* defined as Void, meaning the documents will not be deserialized.
*/
public CompletableFuture> getSource(GetSourceRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #getSource(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final CompletableFuture> getSource(
Function> fn) {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), Void.class);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> getSource(GetSourceRequest request,
Type tDocumentType) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get_source.Response.TDocument",
getDeserializer(tDocumentType));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link GetSourceRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> getSource(
Function> fn, Type tDocumentType) {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: health_report
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture healthReport(HealthReportRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) HealthReportRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @param fn
* a function that initializes a builder to create the
* {@link HealthReportRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture healthReport(
Function> fn) {
return healthReport(fn.apply(new HealthReportRequest.Builder()).build());
}
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture healthReport() {
return this.transport.performRequestAsync(new HealthReportRequest.Builder().build(),
HealthReportRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: index
/**
* Create or update a document in an index.
*
* Add a JSON document to the specified data stream or index and make it
* searchable. If the target is an index and the document already exists, the
* request updates the document and increments its version.
*
* NOTE: You cannot use this API to send update requests for existing documents
* in a data stream.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add or overwrite a document using the
*
PUT /<target>/_doc/<_id>
request format, you must
* have the create
, index
, or write
index
* privilege.
* - To add a document using the
POST /<target>/_doc/
* request format, you must have the create_doc
,
* create
, index
, or write
index
* privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* NOTE: Replica shards might not all be started when an indexing operation
* returns successfully. By default, only the primary is required. Set
* wait_for_active_shards
to change this default behavior.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Optimistic concurrency control
*
* Index operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* No operation (noop) updates
*
* When updating a document by using this API, a new version of the document is
* always created even if the document hasn't changed. If this isn't acceptable
* use the _update
API with detect_noop
set to
* true
. The detect_noop
option isn't available on
* this API because it doesn’t fetch the old source and isn't able to compare it
* against the new source.
*
* There isn't a definitive rule for when noop updates aren't acceptable. It's a
* combination of lots of factors like how frequently your data source sends
* updates that are actually noops and how many queries per second Elasticsearch
* runs on the shard receiving the updates.
*
* Versioning
*
* Each indexed document is given a version number. By default, internal
* versioning is used that starts at 1 and increments with each update, deletes
* included. Optionally, the version number can be set to an external value (for
* example, if maintained in a database). To enable this functionality,
* version_type
should be set to external
. The value
* provided must be a numeric, long value greater than or equal to 0, and less
* than around 9.2e+18
.
*
* NOTE: Versioning is completely real time, and is not affected by the near
* real time aspects of search operations. If no version is provided, the
* operation runs without any version checks.
*
* When using the external version type, the system checks to see if the version
* number passed to the index request is greater than the version of the
* currently stored document. If true, the document will be indexed and the new
* version number used. If the value provided is less than or equal to the
* stored document's version number, a version conflict will occur and the index
* operation will fail. For example:
*
*
* PUT my-index-000001/_doc/1?version=2&version_type=external
* {
* "user": {
* "id": "elkbee"
* }
* }
*
* In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
* If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
*
* A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
* Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture index(IndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, IndexResponse, ErrorResponse> endpoint = (JsonEndpoint, IndexResponse, ErrorResponse>) IndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Create or update a document in an index.
*
* Add a JSON document to the specified data stream or index and make it
* searchable. If the target is an index and the document already exists, the
* request updates the document and increments its version.
*
* NOTE: You cannot use this API to send update requests for existing documents
* in a data stream.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add or overwrite a document using the
*
PUT /<target>/_doc/<_id>
request format, you must
* have the create
, index
, or write
index
* privilege.
* - To add a document using the
POST /<target>/_doc/
* request format, you must have the create_doc
,
* create
, index
, or write
index
* privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* NOTE: Replica shards might not all be started when an indexing operation
* returns successfully. By default, only the primary is required. Set
* wait_for_active_shards
to change this default behavior.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Optimistic concurrency control
*
* Index operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* No operation (noop) updates
*
* When updating a document by using this API, a new version of the document is
* always created even if the document hasn't changed. If this isn't acceptable
* use the _update
API with detect_noop
set to
* true
. The detect_noop
option isn't available on
* this API because it doesn’t fetch the old source and isn't able to compare it
* against the new source.
*
* There isn't a definitive rule for when noop updates aren't acceptable. It's a
* combination of lots of factors like how frequently your data source sends
* updates that are actually noops and how many queries per second Elasticsearch
* runs on the shard receiving the updates.
*
* Versioning
*
* Each indexed document is given a version number. By default, internal
* versioning is used that starts at 1 and increments with each update, deletes
* included. Optionally, the version number can be set to an external value (for
* example, if maintained in a database). To enable this functionality,
* version_type
should be set to external
. The value
* provided must be a numeric, long value greater than or equal to 0, and less
* than around 9.2e+18
.
*
* NOTE: Versioning is completely real time, and is not affected by the near
* real time aspects of search operations. If no version is provided, the
* operation runs without any version checks.
*
* When using the external version type, the system checks to see if the version
* number passed to the index request is greater than the version of the
* currently stored document. If true, the document will be indexed and the new
* version number used. If the value provided is less than or equal to the
* stored document's version number, a version conflict will occur and the index
* operation will fail. For example:
*
*
* PUT my-index-000001/_doc/1?version=2&version_type=external
* {
* "user": {
* "id": "elkbee"
* }
* }
*
* In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
* If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
*
* A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
* Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link IndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture index(
Function, ObjectBuilder>> fn) {
return index(fn.apply(new IndexRequest.Builder()).build());
}
// ----- Endpoint: info
/**
* Get cluster info. Get basic build, version, and cluster information.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture info() {
return this.transport.performRequestAsync(InfoRequest._INSTANCE, InfoRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: mget
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> mget(MgetRequest request,
Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @param fn
* a function that initializes a builder to create the
* {@link MgetRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> mget(
Function> fn, Class tDocumentClass) {
return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #mget(MgetRequest, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public CompletableFuture> mget(MgetRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #mget(Function, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public final CompletableFuture> mget(
Function> fn) {
return mget(fn.apply(new MgetRequest.Builder()).build(), Void.class);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> mget(MgetRequest request, Type tDocumentType) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @param fn
* a function that initializes a builder to create the
* {@link MgetRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> mget(
Function> fn, Type tDocumentType) {
return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: msearch
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> msearch(MsearchRequest request,
Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> msearch(
Function> fn, Class tDocumentClass) {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #msearch(MsearchRequest, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public CompletableFuture> msearch(MsearchRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #msearch(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final CompletableFuture> msearch(
Function> fn) {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), Void.class);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> msearch(MsearchRequest request,
Type tDocumentType) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> msearch(
Function> fn, Type tDocumentType) {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: msearch_template
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture> msearchTemplate(
MsearchTemplateRequest request, Class tDocumentClass) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch_template.Response.TDocument",
getDeserializer(tDocumentClass));
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture> msearchTemplate(
Function> fn,
Class tDocumentClass) {
return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #msearchTemplate(MsearchTemplateRequest, Class)}, where
* Class is defined as Void, meaning the documents will not be deserialized.
*/
public CompletableFuture> msearchTemplate(MsearchTemplateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #msearchTemplate(Function, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public final CompletableFuture