
co.elastic.clients.elasticsearch.ElasticsearchClient Maven / Gradle / Ivy
Show all versions of org.apache.servicemix.bundles.elasticsearch-java
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package co.elastic.clients.elasticsearch;
import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ElasticsearchException;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.elasticsearch.async_search.ElasticsearchAsyncSearchClient;
import co.elastic.clients.elasticsearch.autoscaling.ElasticsearchAutoscalingClient;
import co.elastic.clients.elasticsearch.cat.ElasticsearchCatClient;
import co.elastic.clients.elasticsearch.ccr.ElasticsearchCcrClient;
import co.elastic.clients.elasticsearch.cluster.ElasticsearchClusterClient;
import co.elastic.clients.elasticsearch.connector.ElasticsearchConnectorClient;
import co.elastic.clients.elasticsearch.core.BulkRequest;
import co.elastic.clients.elasticsearch.core.BulkResponse;
import co.elastic.clients.elasticsearch.core.ClearScrollRequest;
import co.elastic.clients.elasticsearch.core.ClearScrollResponse;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeRequest;
import co.elastic.clients.elasticsearch.core.ClosePointInTimeResponse;
import co.elastic.clients.elasticsearch.core.CountRequest;
import co.elastic.clients.elasticsearch.core.CountResponse;
import co.elastic.clients.elasticsearch.core.CreateRequest;
import co.elastic.clients.elasticsearch.core.CreateResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryResponse;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.DeleteByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.DeleteRequest;
import co.elastic.clients.elasticsearch.core.DeleteResponse;
import co.elastic.clients.elasticsearch.core.DeleteScriptRequest;
import co.elastic.clients.elasticsearch.core.DeleteScriptResponse;
import co.elastic.clients.elasticsearch.core.ExistsRequest;
import co.elastic.clients.elasticsearch.core.ExistsSourceRequest;
import co.elastic.clients.elasticsearch.core.ExplainRequest;
import co.elastic.clients.elasticsearch.core.ExplainResponse;
import co.elastic.clients.elasticsearch.core.FieldCapsRequest;
import co.elastic.clients.elasticsearch.core.FieldCapsResponse;
import co.elastic.clients.elasticsearch.core.GetRequest;
import co.elastic.clients.elasticsearch.core.GetResponse;
import co.elastic.clients.elasticsearch.core.GetScriptContextRequest;
import co.elastic.clients.elasticsearch.core.GetScriptContextResponse;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesRequest;
import co.elastic.clients.elasticsearch.core.GetScriptLanguagesResponse;
import co.elastic.clients.elasticsearch.core.GetScriptRequest;
import co.elastic.clients.elasticsearch.core.GetScriptResponse;
import co.elastic.clients.elasticsearch.core.GetSourceRequest;
import co.elastic.clients.elasticsearch.core.GetSourceResponse;
import co.elastic.clients.elasticsearch.core.HealthReportRequest;
import co.elastic.clients.elasticsearch.core.HealthReportResponse;
import co.elastic.clients.elasticsearch.core.IndexRequest;
import co.elastic.clients.elasticsearch.core.IndexResponse;
import co.elastic.clients.elasticsearch.core.InfoRequest;
import co.elastic.clients.elasticsearch.core.InfoResponse;
import co.elastic.clients.elasticsearch.core.MgetRequest;
import co.elastic.clients.elasticsearch.core.MgetResponse;
import co.elastic.clients.elasticsearch.core.MsearchRequest;
import co.elastic.clients.elasticsearch.core.MsearchResponse;
import co.elastic.clients.elasticsearch.core.MsearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.MsearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.MtermvectorsRequest;
import co.elastic.clients.elasticsearch.core.MtermvectorsResponse;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeRequest;
import co.elastic.clients.elasticsearch.core.OpenPointInTimeResponse;
import co.elastic.clients.elasticsearch.core.PingRequest;
import co.elastic.clients.elasticsearch.core.PutScriptRequest;
import co.elastic.clients.elasticsearch.core.PutScriptResponse;
import co.elastic.clients.elasticsearch.core.RankEvalRequest;
import co.elastic.clients.elasticsearch.core.RankEvalResponse;
import co.elastic.clients.elasticsearch.core.ReindexRequest;
import co.elastic.clients.elasticsearch.core.ReindexResponse;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleRequest;
import co.elastic.clients.elasticsearch.core.ReindexRethrottleResponse;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.RenderSearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteRequest;
import co.elastic.clients.elasticsearch.core.ScriptsPainlessExecuteResponse;
import co.elastic.clients.elasticsearch.core.ScrollRequest;
import co.elastic.clients.elasticsearch.core.ScrollResponse;
import co.elastic.clients.elasticsearch.core.SearchMvtRequest;
import co.elastic.clients.elasticsearch.core.SearchRequest;
import co.elastic.clients.elasticsearch.core.SearchResponse;
import co.elastic.clients.elasticsearch.core.SearchShardsRequest;
import co.elastic.clients.elasticsearch.core.SearchShardsResponse;
import co.elastic.clients.elasticsearch.core.SearchTemplateRequest;
import co.elastic.clients.elasticsearch.core.SearchTemplateResponse;
import co.elastic.clients.elasticsearch.core.TermsEnumRequest;
import co.elastic.clients.elasticsearch.core.TermsEnumResponse;
import co.elastic.clients.elasticsearch.core.TermvectorsRequest;
import co.elastic.clients.elasticsearch.core.TermvectorsResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryResponse;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleRequest;
import co.elastic.clients.elasticsearch.core.UpdateByQueryRethrottleResponse;
import co.elastic.clients.elasticsearch.core.UpdateRequest;
import co.elastic.clients.elasticsearch.core.UpdateResponse;
import co.elastic.clients.elasticsearch.dangling_indices.ElasticsearchDanglingIndicesClient;
import co.elastic.clients.elasticsearch.enrich.ElasticsearchEnrichClient;
import co.elastic.clients.elasticsearch.eql.ElasticsearchEqlClient;
import co.elastic.clients.elasticsearch.esql.ElasticsearchEsqlClient;
import co.elastic.clients.elasticsearch.features.ElasticsearchFeaturesClient;
import co.elastic.clients.elasticsearch.fleet.ElasticsearchFleetClient;
import co.elastic.clients.elasticsearch.graph.ElasticsearchGraphClient;
import co.elastic.clients.elasticsearch.ilm.ElasticsearchIlmClient;
import co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesClient;
import co.elastic.clients.elasticsearch.inference.ElasticsearchInferenceClient;
import co.elastic.clients.elasticsearch.ingest.ElasticsearchIngestClient;
import co.elastic.clients.elasticsearch.license.ElasticsearchLicenseClient;
import co.elastic.clients.elasticsearch.logstash.ElasticsearchLogstashClient;
import co.elastic.clients.elasticsearch.migration.ElasticsearchMigrationClient;
import co.elastic.clients.elasticsearch.ml.ElasticsearchMlClient;
import co.elastic.clients.elasticsearch.monitoring.ElasticsearchMonitoringClient;
import co.elastic.clients.elasticsearch.nodes.ElasticsearchNodesClient;
import co.elastic.clients.elasticsearch.query_rules.ElasticsearchQueryRulesClient;
import co.elastic.clients.elasticsearch.rollup.ElasticsearchRollupClient;
import co.elastic.clients.elasticsearch.search_application.ElasticsearchSearchApplicationClient;
import co.elastic.clients.elasticsearch.searchable_snapshots.ElasticsearchSearchableSnapshotsClient;
import co.elastic.clients.elasticsearch.security.ElasticsearchSecurityClient;
import co.elastic.clients.elasticsearch.shutdown.ElasticsearchShutdownClient;
import co.elastic.clients.elasticsearch.simulate.ElasticsearchSimulateClient;
import co.elastic.clients.elasticsearch.slm.ElasticsearchSlmClient;
import co.elastic.clients.elasticsearch.snapshot.ElasticsearchSnapshotClient;
import co.elastic.clients.elasticsearch.sql.ElasticsearchSqlClient;
import co.elastic.clients.elasticsearch.ssl.ElasticsearchSslClient;
import co.elastic.clients.elasticsearch.synonyms.ElasticsearchSynonymsClient;
import co.elastic.clients.elasticsearch.tasks.ElasticsearchTasksClient;
import co.elastic.clients.elasticsearch.text_structure.ElasticsearchTextStructureClient;
import co.elastic.clients.elasticsearch.transform.ElasticsearchTransformClient;
import co.elastic.clients.elasticsearch.watcher.ElasticsearchWatcherClient;
import co.elastic.clients.elasticsearch.xpack.ElasticsearchXpackClient;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.ElasticsearchTransportConfig;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BinaryResponse;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.transport.endpoints.EndpointWithResponseMapperAttr;
import co.elastic.clients.util.ObjectBuilder;
import java.io.IOException;
import java.lang.reflect.Type;
import java.util.function.Function;
import javax.annotation.Nullable;
//----------------------------------------------------------------
// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------
/**
* Client for the namespace.
*/
public class ElasticsearchClient extends ApiClient {
/**
* Creates a client from a {@link ElasticsearchTransportConfig.Default}}
* configuration created with an inline lambda expression.
*/
public static ElasticsearchClient of(
Function fn) {
return new ElasticsearchClient(fn.apply(new ElasticsearchTransportConfig.Builder()).build().buildTransport());
}
/**
* Creates a client from an {@link ElasticsearchTransportConfig}.
*/
public ElasticsearchClient(ElasticsearchTransportConfig config) {
this(config.buildTransport());
}
public ElasticsearchClient(ElasticsearchTransport transport) {
super(transport, null);
}
public ElasticsearchClient(ElasticsearchTransport transport, @Nullable TransportOptions transportOptions) {
super(transport, transportOptions);
}
@Override
public ElasticsearchClient withTransportOptions(@Nullable TransportOptions transportOptions) {
return new ElasticsearchClient(this.transport, transportOptions);
}
// ----- Child clients
public ElasticsearchAsyncSearchClient asyncSearch() {
return new ElasticsearchAsyncSearchClient(this.transport, this.transportOptions);
}
public ElasticsearchAutoscalingClient autoscaling() {
return new ElasticsearchAutoscalingClient(this.transport, this.transportOptions);
}
public ElasticsearchCatClient cat() {
return new ElasticsearchCatClient(this.transport, this.transportOptions);
}
public ElasticsearchCcrClient ccr() {
return new ElasticsearchCcrClient(this.transport, this.transportOptions);
}
public ElasticsearchClusterClient cluster() {
return new ElasticsearchClusterClient(this.transport, this.transportOptions);
}
public ElasticsearchConnectorClient connector() {
return new ElasticsearchConnectorClient(this.transport, this.transportOptions);
}
public ElasticsearchDanglingIndicesClient danglingIndices() {
return new ElasticsearchDanglingIndicesClient(this.transport, this.transportOptions);
}
public ElasticsearchEnrichClient enrich() {
return new ElasticsearchEnrichClient(this.transport, this.transportOptions);
}
public ElasticsearchEqlClient eql() {
return new ElasticsearchEqlClient(this.transport, this.transportOptions);
}
public ElasticsearchEsqlClient esql() {
return new ElasticsearchEsqlClient(this.transport, this.transportOptions);
}
public ElasticsearchFeaturesClient features() {
return new ElasticsearchFeaturesClient(this.transport, this.transportOptions);
}
public ElasticsearchFleetClient fleet() {
return new ElasticsearchFleetClient(this.transport, this.transportOptions);
}
public ElasticsearchGraphClient graph() {
return new ElasticsearchGraphClient(this.transport, this.transportOptions);
}
public ElasticsearchIlmClient ilm() {
return new ElasticsearchIlmClient(this.transport, this.transportOptions);
}
public ElasticsearchIndicesClient indices() {
return new ElasticsearchIndicesClient(this.transport, this.transportOptions);
}
public ElasticsearchInferenceClient inference() {
return new ElasticsearchInferenceClient(this.transport, this.transportOptions);
}
public ElasticsearchIngestClient ingest() {
return new ElasticsearchIngestClient(this.transport, this.transportOptions);
}
public ElasticsearchLicenseClient license() {
return new ElasticsearchLicenseClient(this.transport, this.transportOptions);
}
public ElasticsearchLogstashClient logstash() {
return new ElasticsearchLogstashClient(this.transport, this.transportOptions);
}
public ElasticsearchMigrationClient migration() {
return new ElasticsearchMigrationClient(this.transport, this.transportOptions);
}
public ElasticsearchMlClient ml() {
return new ElasticsearchMlClient(this.transport, this.transportOptions);
}
public ElasticsearchMonitoringClient monitoring() {
return new ElasticsearchMonitoringClient(this.transport, this.transportOptions);
}
public ElasticsearchNodesClient nodes() {
return new ElasticsearchNodesClient(this.transport, this.transportOptions);
}
public ElasticsearchQueryRulesClient queryRules() {
return new ElasticsearchQueryRulesClient(this.transport, this.transportOptions);
}
public ElasticsearchRollupClient rollup() {
return new ElasticsearchRollupClient(this.transport, this.transportOptions);
}
public ElasticsearchSearchApplicationClient searchApplication() {
return new ElasticsearchSearchApplicationClient(this.transport, this.transportOptions);
}
public ElasticsearchSearchableSnapshotsClient searchableSnapshots() {
return new ElasticsearchSearchableSnapshotsClient(this.transport, this.transportOptions);
}
public ElasticsearchSecurityClient security() {
return new ElasticsearchSecurityClient(this.transport, this.transportOptions);
}
public ElasticsearchShutdownClient shutdown() {
return new ElasticsearchShutdownClient(this.transport, this.transportOptions);
}
public ElasticsearchSimulateClient simulate() {
return new ElasticsearchSimulateClient(this.transport, this.transportOptions);
}
public ElasticsearchSlmClient slm() {
return new ElasticsearchSlmClient(this.transport, this.transportOptions);
}
public ElasticsearchSnapshotClient snapshot() {
return new ElasticsearchSnapshotClient(this.transport, this.transportOptions);
}
public ElasticsearchSqlClient sql() {
return new ElasticsearchSqlClient(this.transport, this.transportOptions);
}
public ElasticsearchSslClient ssl() {
return new ElasticsearchSslClient(this.transport, this.transportOptions);
}
public ElasticsearchSynonymsClient synonyms() {
return new ElasticsearchSynonymsClient(this.transport, this.transportOptions);
}
public ElasticsearchTasksClient tasks() {
return new ElasticsearchTasksClient(this.transport, this.transportOptions);
}
public ElasticsearchTextStructureClient textStructure() {
return new ElasticsearchTextStructureClient(this.transport, this.transportOptions);
}
public ElasticsearchTransformClient transform() {
return new ElasticsearchTransformClient(this.transport, this.transportOptions);
}
public ElasticsearchWatcherClient watcher() {
return new ElasticsearchWatcherClient(this.transport, this.transportOptions);
}
public ElasticsearchXpackClient xpack() {
return new ElasticsearchXpackClient(this.transport, this.transportOptions);
}
// ----- Endpoint: bulk
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @see Documentation
* on elastic.co
*/
public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) BulkRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @param fn
* a function that initializes a builder to create the
* {@link BulkRequest}
* @see Documentation
* on elastic.co
*/
public final BulkResponse bulk(Function> fn)
throws IOException, ElasticsearchException {
return bulk(fn.apply(new BulkRequest.Builder()).build());
}
/**
* Bulk index or delete documents. Perform multiple index
,
* create
, delete
, and update
actions in
* a single request. This reduces overhead and can greatly increase indexing
* speed.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To use the
create
action, you must have the
* create_doc
, create
, index
, or
* write
index privilege. Data streams support only the
* create
action.
* - To use the
index
action, you must have the
* create
, index
, or write
index
* privilege.
* - To use the
delete
action, you must have the
* delete
or write
index privilege.
* - To use the
update
action, you must have the
* index
or write
index privilege.
* - To automatically create a data stream or index with a bulk API request,
* you must have the
auto_configure
, create_index
, or
* manage
index privilege.
* - To make the result of a bulk operation visible to search using the
*
refresh
parameter, you must have the maintenance
or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* The actions are specified in the request body using a newline delimited JSON
* (NDJSON) structure:
*
*
* action_and_meta_data\n
* optional_source\n
* action_and_meta_data\n
* optional_source\n
* ....
* action_and_meta_data\n
* optional_source\n
*
*
*
* The index
and create
actions expect a source on the
* next line and have the same semantics as the op_type
parameter
* in the standard index API. A create
action fails if a document
* with the same ID already exists in the target An index
action
* adds or replaces a document as necessary.
*
* NOTE: Data streams support only the create
action. To update or
* delete a document in a data stream, you must target the backing index
* containing the document.
*
* An update
action expects that the partial doc, upsert, and
* script and its options are specified on the next line.
*
* A delete
action does not expect a source on the next line and
* has the same semantics as the standard delete API.
*
* NOTE: The final line of data must end with a newline character
* (\n
). Each newline character may be preceded by a carriage
* return (\r
). When sending NDJSON data to the _bulk
* endpoint, use a Content-Type
header of
* application/json
or application/x-ndjson
. Because
* this format uses literal newline characters (\n
) as delimiters,
* make sure that the JSON actions and sources are not pretty printed.
*
* If you provide a target in the request path, it is used for any actions that
* don't explicitly specify an _index
argument.
*
* A note on the format: the idea here is to make processing as fast as
* possible. As some of the actions are redirected to other shards on other
* nodes, only action_meta_data
is parsed on the receiving node
* side.
*
* Client libraries using this protocol should try and strive to do something
* similar on the client side, and reduce buffering as much as possible.
*
* There is no "correct" number of actions to perform in a single bulk
* request. Experiment with different settings to find the optimal size for your
* particular workload. Note that Elasticsearch limits the maximum size of a
* HTTP request to 100mb by default so clients must ensure that no request
* exceeds this size. It is not possible to index a single document that exceeds
* the size limit, so you must pre-process any such documents into smaller
* pieces before sending them to Elasticsearch. For instance, split documents
* into pages or chapters before indexing them, or store raw binary data in a
* system outside Elasticsearch and replace the raw data with a link to the
* external system in the documents that you send to Elasticsearch.
*
* Client suppport for bulk requests
*
* Some of the officially supported clients provide helpers to assist with bulk
* requests and reindexing:
*
* - Go: Check out
esutil.BulkIndexer
* - Perl: Check out
Search::Elasticsearch::Client::5_0::Bulk
and
* Search::Elasticsearch::Client::5_0::Scroll
* - Python: Check out
elasticsearch.helpers.*
* - JavaScript: Check out
client.helpers.*
* - .NET: Check out
BulkAllObservable
* - PHP: Check out bulk indexing.
*
*
* Submitting bulk requests with cURL
*
* If you're providing text file input to curl
, you must use the
* --data-binary
flag instead of plain -d
. The latter
* doesn't preserve newlines. For example:
*
*
* $ cat requests
* { "index" : { "_index" : "test", "_id" : "1" } }
* { "field1" : "value1" }
* $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
* {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
*
*
*
* Optimistic concurrency control
*
* Each index
and delete
action within a bulk API call
* may include the if_seq_no
and if_primary_term
* parameters in their respective action and meta data lines. The
* if_seq_no
and if_primary_term
parameters control
* how operations are run, based on the last modification to existing documents.
* See Optimistic concurrency control for more details.
*
* Versioning
*
* Each bulk item can include the version value using the version
* field. It automatically follows the behavior of the index or delete operation
* based on the _version
mapping. It also support the
* version_type
.
*
* Routing
*
* Each bulk item can include the routing value using the routing
* field. It automatically follows the behavior of the index or delete operation
* based on the _routing
mapping.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Wait for active shards
*
* When making bulk calls, you can set the wait_for_active_shards
* parameter to require a minimum number of shard copies to be active before
* starting to process the bulk request.
*
* Refresh
*
* Control when the changes made by this request are visible to search.
*
* NOTE: Only the shards that receive the bulk request will be affected by
* refresh. Imagine a _bulk?refresh=wait_for
request with three
* documents in it that happen to be routed to different shards in an index with
* five shards. The request will only wait for those three shards to refresh.
* The other two shards that make up the index do not participate in the
* _bulk
request at all.
*
* You might want to disable the refresh interval temporarily to improve
* indexing throughput for large bulk requests. Refer to the linked
* documentation for step-by-step instructions using the index settings API.
*
* @see Documentation
* on elastic.co
*/
public BulkResponse bulk() throws IOException, ElasticsearchException {
return this.transport.performRequest(new BulkRequest.Builder().build(), BulkRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: clear_scroll
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @see Documentation
* on elastic.co
*/
public ClearScrollResponse clearScroll(ClearScrollRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClearScrollRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClearScrollRequest}
* @see Documentation
* on elastic.co
*/
public final ClearScrollResponse clearScroll(
Function> fn)
throws IOException, ElasticsearchException {
return clearScroll(fn.apply(new ClearScrollRequest.Builder()).build());
}
/**
* Clear a scrolling search. Clear the search context and results for a
* scrolling search.
*
* @see Documentation
* on elastic.co
*/
public ClearScrollResponse clearScroll() throws IOException, ElasticsearchException {
return this.transport.performRequest(new ClearScrollRequest.Builder().build(), ClearScrollRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: close_point_in_time
/**
* Close a point in time. A point in time must be opened explicitly before being
* used in search requests. The keep_alive
parameter tells
* Elasticsearch how long it should persist. A point in time is automatically
* closed when the keep_alive
period has elapsed. However, keeping
* points in time has a cost; close them as soon as they are no longer required
* for search requests.
*
* @see Documentation
* on elastic.co
*/
public ClosePointInTimeResponse closePointInTime(ClosePointInTimeRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClosePointInTimeRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Close a point in time. A point in time must be opened explicitly before being
* used in search requests. The keep_alive
parameter tells
* Elasticsearch how long it should persist. A point in time is automatically
* closed when the keep_alive
period has elapsed. However, keeping
* points in time has a cost; close them as soon as they are no longer required
* for search requests.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClosePointInTimeRequest}
* @see Documentation
* on elastic.co
*/
public final ClosePointInTimeResponse closePointInTime(
Function> fn)
throws IOException, ElasticsearchException {
return closePointInTime(fn.apply(new ClosePointInTimeRequest.Builder()).build());
}
// ----- Endpoint: count
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @see Documentation
* on elastic.co
*/
public CountResponse count(CountRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CountRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @param fn
* a function that initializes a builder to create the
* {@link CountRequest}
* @see Documentation
* on elastic.co
*/
public final CountResponse count(Function> fn)
throws IOException, ElasticsearchException {
return count(fn.apply(new CountRequest.Builder()).build());
}
/**
* Count search results. Get the number of documents matching a query.
*
* The query can be provided either by using a simple query string as a
* parameter, or by defining Query DSL within the request body. The query is
* optional. When no query is provided, the API uses match_all
to
* count all the documents.
*
* The count API supports multi-target syntax. You can run a single count API
* search across multiple data streams and indices.
*
* The operation is broadcast across all shards. For each shard ID group, a
* replica is chosen and the search is run against it. This means that replicas
* increase the scalability of the count.
*
* @see Documentation
* on elastic.co
*/
public CountResponse count() throws IOException, ElasticsearchException {
return this.transport.performRequest(new CountRequest.Builder().build(), CountRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: create
/**
* Create a new document in the index.
*
* You can index a new JSON document with the /<target>/_doc/
* or /<target>/_create/<_id>
APIs Using
* _create
guarantees that the document is indexed only if it does
* not already exist. It returns a 409 response when a document with a same ID
* already exists in the index. To update an existing document, you must use the
* /<target>/_doc/
API.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add a document using the
*
PUT /<target>/_create/<_id>
or
* POST /<target>/_create/<_id>
request formats, you
* must have the create_doc
, create
,
* index
, or write
index privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* @see Documentation
* on elastic.co
*/
public CreateResponse create(CreateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, CreateResponse, ErrorResponse> endpoint = (JsonEndpoint, CreateResponse, ErrorResponse>) CreateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create a new document in the index.
*
* You can index a new JSON document with the /<target>/_doc/
* or /<target>/_create/<_id>
APIs Using
* _create
guarantees that the document is indexed only if it does
* not already exist. It returns a 409 response when a document with a same ID
* already exists in the index. To update an existing document, you must use the
* /<target>/_doc/
API.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add a document using the
*
PUT /<target>/_create/<_id>
or
* POST /<target>/_create/<_id>
request formats, you
* must have the create_doc
, create
,
* index
, or write
index privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateRequest}
* @see Documentation
* on elastic.co
*/
public final CreateResponse create(
Function, ObjectBuilder>> fn)
throws IOException, ElasticsearchException {
return create(fn.apply(new CreateRequest.Builder()).build());
}
// ----- Endpoint: delete
/**
* Delete a document.
*
* Remove a JSON document from the specified index.
*
* NOTE: You cannot send deletion requests directly to a data stream. To delete
* a document in a data stream, you must target the backing index containing the
* document.
*
* Optimistic concurrency control
*
* Delete operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Versioning
*
* Each document indexed is versioned. When deleting a document, the version can
* be specified to make sure the relevant document you are trying to delete is
* actually being deleted and it has not changed in the meantime. Every write
* operation run on a document, deletes included, causes its version to be
* incremented. The version number of a deleted document remains available for a
* short time after deletion to allow for control of concurrent operations. The
* length of time for which a deleted document's version remains available is
* determined by the index.gc_deletes
index setting.
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to delete a document.
*
* If the _routing
mapping is set to required
and no
* routing value is specified, the delete API throws a
* RoutingMissingException
and rejects the request.
*
* For example:
*
*
* DELETE /my-index-000001/_doc/1?routing=shard-1
*
*
*
* This request deletes the document with ID 1, but it is routed based on the
* user. The document is not deleted if the correct routing is not specified.
*
* Distributed
*
* The delete operation gets hashed into a specific shard ID. It then gets
* redirected into the primary shard within that ID group and replicated (if
* needed) to shard replicas within that ID group.
*
* @see Documentation
* on elastic.co
*/
public DeleteResponse delete(DeleteRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete a document.
*
* Remove a JSON document from the specified index.
*
* NOTE: You cannot send deletion requests directly to a data stream. To delete
* a document in a data stream, you must target the backing index containing the
* document.
*
* Optimistic concurrency control
*
* Delete operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Versioning
*
* Each document indexed is versioned. When deleting a document, the version can
* be specified to make sure the relevant document you are trying to delete is
* actually being deleted and it has not changed in the meantime. Every write
* operation run on a document, deletes included, causes its version to be
* incremented. The version number of a deleted document remains available for a
* short time after deletion to allow for control of concurrent operations. The
* length of time for which a deleted document's version remains available is
* determined by the index.gc_deletes
index setting.
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to delete a document.
*
* If the _routing
mapping is set to required
and no
* routing value is specified, the delete API throws a
* RoutingMissingException
and rejects the request.
*
* For example:
*
*
* DELETE /my-index-000001/_doc/1?routing=shard-1
*
*
*
* This request deletes the document with ID 1, but it is routed based on the
* user. The document is not deleted if the correct routing is not specified.
*
* Distributed
*
* The delete operation gets hashed into a specific shard ID. It then gets
* redirected into the primary shard within that ID group and replicated (if
* needed) to shard replicas within that ID group.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteResponse delete(Function> fn)
throws IOException, ElasticsearchException {
return delete(fn.apply(new DeleteRequest.Builder()).build());
}
// ----- Endpoint: delete_by_query
/**
* Delete documents.
*
* Deletes documents that match the specified query.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or alias:
*
* read
* delete
or write
*
*
* You can specify the query criteria in the request URI or the request body
* using the same syntax as the search API. When you submit a delete by query
* request, Elasticsearch gets a snapshot of the data stream or index when it
* begins processing the request and deletes matching documents using internal
* versioning. If a document changes between the time that the snapshot is taken
* and the delete operation is processed, it results in a version conflict and
* the delete operation fails.
*
* NOTE: Documents with a version equal to 0 cannot be deleted using delete by
* query because internal versioning does not support 0 as a valid version
* number.
*
* While processing a delete by query request, Elasticsearch performs multiple
* search requests sequentially to find all of the matching documents to delete.
* A bulk delete request is performed for each batch of matching documents. If a
* search or bulk request is rejected, the requests are retried up to 10 times,
* with exponential back off. If the maximum retry limit is reached, processing
* halts and all failed requests are returned in the response. Any delete
* requests that completed successfully still stick, they are not rolled back.
*
* You can opt to count version conflicts instead of halting and returning by
* setting conflicts
to proceed
. Note that if you opt
* to count version conflicts the operation could attempt to delete more
* documents from the source than max_docs
until it has
* successfully deleted max_docs documents
, or it has gone through
* every document in the source query.
*
* Throttling delete requests
*
* To control the rate at which delete by query issues batches of delete
* operations, you can set requests_per_second
to any positive
* decimal number. This pads each batch with a wait time to throttle the rate.
* Set requests_per_second
to -1
to disable
* throttling.
*
* Throttling uses a wait time between batches so that the internal scroll
* requests can be given a timeout that takes the request padding into account.
* The padding time is the difference between the batch size divided by the
* requests_per_second
and the time spent writing. By default the
* batch size is 1000
, so if requests_per_second
is
* set to 500
:
*
*
* target_time = 1000 / 500 per second = 2 seconds
* wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
*
*
*
* Since the batch is issued as a single _bulk
request, large batch
* sizes cause Elasticsearch to create many requests and wait before starting
* the next set. This is "bursty" instead of "smooth".
*
* Slicing
*
* Delete by query supports sliced scroll to parallelize the delete process.
* This can improve efficiency and provide a convenient way to break the request
* down into smaller parts.
*
* Setting slices
to auto
lets Elasticsearch choose
* the number of slices to use. This setting will use one slice per shard, up to
* a certain limit. If there are multiple source data streams or indices, it
* will choose the number of slices based on the index or backing index with the
* smallest number of shards. Adding slices to the delete by query operation
* creates sub-requests which means it has some quirks:
*
* - You can see these requests in the tasks APIs. These sub-requests are
* "child" tasks of the task for the request with slices.
* - Fetching the status of the task for the request with slices only contains
* the status of completed slices.
* - These sub-requests are individually addressable for things like
* cancellation and rethrottling.
* - Rethrottling the request with
slices
will rethrottle the
* unfinished sub-request proportionally.
* - Canceling the request with
slices
will cancel each
* sub-request.
* - Due to the nature of
slices
each sub-request won't get a
* perfectly even portion of the documents. All documents will be addressed, but
* some slices may be larger than others. Expect larger slices to have a more
* even distribution.
* - Parameters like
requests_per_second
and
* max_docs
on a request with slices
are distributed
* proportionally to each sub-request. Combine that with the earlier point about
* distribution being uneven and you should conclude that using
* max_docs
with slices
might not result in exactly
* max_docs
documents being deleted.
* - Each sub-request gets a slightly different snapshot of the source data
* stream or index though these are all taken at approximately the same
* time.
*
*
* If you're slicing manually or otherwise tuning automatic slicing, keep in
* mind that:
*
* - Query performance is most efficient when the number of slices is equal to
* the number of shards in the index or backing index. If that number is large
* (for example, 500), choose a lower number as too many
slices
* hurts performance. Setting slices
higher than the number of
* shards generally does not improve efficiency and adds overhead.
* - Delete performance scales linearly across available resources with the
* number of slices.
*
*
* Whether query or delete performance dominates the runtime depends on the
* documents being reindexed and cluster resources.
*
* Cancel a delete by query operation
*
* Any delete by query can be canceled using the task cancel API. For example:
*
*
* POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
*
*
*
* The task ID can be found by using the get tasks API.
*
* Cancellation should happen quickly but might take a few seconds. The get task
* status API will continue to list the delete by query task until this task
* checks that it has been cancelled and terminates itself.
*
* @see Documentation
* on elastic.co
*/
public DeleteByQueryResponse deleteByQuery(DeleteByQueryRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete documents.
*
* Deletes documents that match the specified query.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or alias:
*
* read
* delete
or write
*
*
* You can specify the query criteria in the request URI or the request body
* using the same syntax as the search API. When you submit a delete by query
* request, Elasticsearch gets a snapshot of the data stream or index when it
* begins processing the request and deletes matching documents using internal
* versioning. If a document changes between the time that the snapshot is taken
* and the delete operation is processed, it results in a version conflict and
* the delete operation fails.
*
* NOTE: Documents with a version equal to 0 cannot be deleted using delete by
* query because internal versioning does not support 0 as a valid version
* number.
*
* While processing a delete by query request, Elasticsearch performs multiple
* search requests sequentially to find all of the matching documents to delete.
* A bulk delete request is performed for each batch of matching documents. If a
* search or bulk request is rejected, the requests are retried up to 10 times,
* with exponential back off. If the maximum retry limit is reached, processing
* halts and all failed requests are returned in the response. Any delete
* requests that completed successfully still stick, they are not rolled back.
*
* You can opt to count version conflicts instead of halting and returning by
* setting conflicts
to proceed
. Note that if you opt
* to count version conflicts the operation could attempt to delete more
* documents from the source than max_docs
until it has
* successfully deleted max_docs documents
, or it has gone through
* every document in the source query.
*
* Throttling delete requests
*
* To control the rate at which delete by query issues batches of delete
* operations, you can set requests_per_second
to any positive
* decimal number. This pads each batch with a wait time to throttle the rate.
* Set requests_per_second
to -1
to disable
* throttling.
*
* Throttling uses a wait time between batches so that the internal scroll
* requests can be given a timeout that takes the request padding into account.
* The padding time is the difference between the batch size divided by the
* requests_per_second
and the time spent writing. By default the
* batch size is 1000
, so if requests_per_second
is
* set to 500
:
*
*
* target_time = 1000 / 500 per second = 2 seconds
* wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
*
*
*
* Since the batch is issued as a single _bulk
request, large batch
* sizes cause Elasticsearch to create many requests and wait before starting
* the next set. This is "bursty" instead of "smooth".
*
* Slicing
*
* Delete by query supports sliced scroll to parallelize the delete process.
* This can improve efficiency and provide a convenient way to break the request
* down into smaller parts.
*
* Setting slices
to auto
lets Elasticsearch choose
* the number of slices to use. This setting will use one slice per shard, up to
* a certain limit. If there are multiple source data streams or indices, it
* will choose the number of slices based on the index or backing index with the
* smallest number of shards. Adding slices to the delete by query operation
* creates sub-requests which means it has some quirks:
*
* - You can see these requests in the tasks APIs. These sub-requests are
* "child" tasks of the task for the request with slices.
* - Fetching the status of the task for the request with slices only contains
* the status of completed slices.
* - These sub-requests are individually addressable for things like
* cancellation and rethrottling.
* - Rethrottling the request with
slices
will rethrottle the
* unfinished sub-request proportionally.
* - Canceling the request with
slices
will cancel each
* sub-request.
* - Due to the nature of
slices
each sub-request won't get a
* perfectly even portion of the documents. All documents will be addressed, but
* some slices may be larger than others. Expect larger slices to have a more
* even distribution.
* - Parameters like
requests_per_second
and
* max_docs
on a request with slices
are distributed
* proportionally to each sub-request. Combine that with the earlier point about
* distribution being uneven and you should conclude that using
* max_docs
with slices
might not result in exactly
* max_docs
documents being deleted.
* - Each sub-request gets a slightly different snapshot of the source data
* stream or index though these are all taken at approximately the same
* time.
*
*
* If you're slicing manually or otherwise tuning automatic slicing, keep in
* mind that:
*
* - Query performance is most efficient when the number of slices is equal to
* the number of shards in the index or backing index. If that number is large
* (for example, 500), choose a lower number as too many
slices
* hurts performance. Setting slices
higher than the number of
* shards generally does not improve efficiency and adds overhead.
* - Delete performance scales linearly across available resources with the
* number of slices.
*
*
* Whether query or delete performance dominates the runtime depends on the
* documents being reindexed and cluster resources.
*
* Cancel a delete by query operation
*
* Any delete by query can be canceled using the task cancel API. For example:
*
*
* POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
*
*
*
* The task ID can be found by using the get tasks API.
*
* Cancellation should happen quickly but might take a few seconds. The get task
* status API will continue to list the delete by query task until this task
* checks that it has been cancelled and terminates itself.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteByQueryRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteByQueryResponse deleteByQuery(
Function> fn)
throws IOException, ElasticsearchException {
return deleteByQuery(fn.apply(new DeleteByQueryRequest.Builder()).build());
}
// ----- Endpoint: delete_by_query_rethrottle
/**
* Throttle a delete by query operation.
*
* Change the number of requests per second for a particular delete by query
* operation. Rethrottling that speeds up the query takes effect immediately but
* rethrotting that slows down the query takes effect after completing the
* current batch to prevent scroll timeouts.
*
* @see Documentation
* on elastic.co
*/
public DeleteByQueryRethrottleResponse deleteByQueryRethrottle(DeleteByQueryRethrottleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteByQueryRethrottleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Throttle a delete by query operation.
*
* Change the number of requests per second for a particular delete by query
* operation. Rethrottling that speeds up the query takes effect immediately but
* rethrotting that slows down the query takes effect after completing the
* current batch to prevent scroll timeouts.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteByQueryRethrottleRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteByQueryRethrottleResponse deleteByQueryRethrottle(
Function> fn)
throws IOException, ElasticsearchException {
return deleteByQueryRethrottle(fn.apply(new DeleteByQueryRethrottleRequest.Builder()).build());
}
// ----- Endpoint: delete_script
/**
* Delete a script or search template. Deletes a stored script or search
* template.
*
* @see Documentation
* on elastic.co
*/
public DeleteScriptResponse deleteScript(DeleteScriptRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteScriptRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete a script or search template. Deletes a stored script or search
* template.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteScriptRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteScriptResponse deleteScript(
Function> fn)
throws IOException, ElasticsearchException {
return deleteScript(fn.apply(new DeleteScriptRequest.Builder()).build());
}
// ----- Endpoint: exists
/**
* Check a document.
*
* Verify that a document exists. For example, check to see if a document with
* the _id
0 exists:
*
*
* HEAD my-index-000001/_doc/0
*
*
*
* If the document exists, the API returns a status code of
* 200 - OK
. If the document doesn’t exist, the API returns
* 404 - Not Found
.
*
* Versioning support
*
* You can use the version
parameter to check the document only if
* its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse exists(ExistsRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check a document.
*
* Verify that a document exists. For example, check to see if a document with
* the _id
0 exists:
*
*
* HEAD my-index-000001/_doc/0
*
*
*
* If the document exists, the API returns a status code of
* 200 - OK
. If the document doesn’t exist, the API returns
* 404 - Not Found
.
*
* Versioning support
*
* You can use the version
parameter to check the document only if
* its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse exists(Function> fn)
throws IOException, ElasticsearchException {
return exists(fn.apply(new ExistsRequest.Builder()).build());
}
// ----- Endpoint: exists_source
/**
* Check for a document source.
*
* Check whether a document source exists in an index. For example:
*
*
* HEAD my-index-000001/_source/1
*
*
*
* A document's source is not available if it is disabled in the mapping.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse existsSource(ExistsSourceRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsSourceRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check for a document source.
*
* Check whether a document source exists in an index. For example:
*
*
* HEAD my-index-000001/_source/1
*
*
*
* A document's source is not available if it is disabled in the mapping.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsSourceRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse existsSource(
Function> fn)
throws IOException, ElasticsearchException {
return existsSource(fn.apply(new ExistsSourceRequest.Builder()).build());
}
// ----- Endpoint: explain
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @see Documentation
* on elastic.co
*/
public ExplainResponse explain(ExplainRequest request, Class tDocumentClass)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainRequest}
* @see Documentation
* on elastic.co
*/
public final ExplainResponse explain(
Function> fn, Class tDocumentClass)
throws IOException, ElasticsearchException {
return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #explain(ExplainRequest, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public ExplainResponse explain(ExplainRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #explain(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final ExplainResponse explain(Function> fn)
throws IOException, ElasticsearchException {
return explain(fn.apply(new ExplainRequest.Builder()).build(), Void.class);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @see Documentation
* on elastic.co
*/
public ExplainResponse explain(ExplainRequest request, Type tDocumentType)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) ExplainRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.explain.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Explain a document match result. Get information about why a specific
* document matches, or doesn't match, a query. It computes a score explanation
* for a query and a specific document.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainRequest}
* @see Documentation
* on elastic.co
*/
public final ExplainResponse explain(
Function> fn, Type tDocumentType)
throws IOException, ElasticsearchException {
return explain(fn.apply(new ExplainRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: field_caps
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @see Documentation
* on elastic.co
*/
public FieldCapsResponse fieldCaps(FieldCapsRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FieldCapsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @param fn
* a function that initializes a builder to create the
* {@link FieldCapsRequest}
* @see Documentation
* on elastic.co
*/
public final FieldCapsResponse fieldCaps(Function> fn)
throws IOException, ElasticsearchException {
return fieldCaps(fn.apply(new FieldCapsRequest.Builder()).build());
}
/**
* Get the field capabilities.
*
* Get information about the capabilities of fields among multiple indices.
*
* For data streams, the API returns field capabilities among the stream’s
* backing indices. It returns runtime fields like any other field. For example,
* a runtime field with a type of keyword is returned the same as any other
* field that belongs to the keyword
family.
*
* @see Documentation
* on elastic.co
*/
public FieldCapsResponse fieldCaps() throws IOException, ElasticsearchException {
return this.transport.performRequest(new FieldCapsRequest.Builder().build(), FieldCapsRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: get
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public GetResponse get(GetRequest request, Class tDocumentClass)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetRequest}
* @see Documentation
* on elastic.co
*/
public final GetResponse get(Function> fn,
Class tDocumentClass) throws IOException, ElasticsearchException {
return get(fn.apply(new GetRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #get(GetRequest, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public GetResponse get(GetRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #get(Function, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public final GetResponse get(Function> fn)
throws IOException, ElasticsearchException {
return get(fn.apply(new GetRequest.Builder()).build(), Void.class);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @see Documentation
* on elastic.co
*/
public GetResponse get(GetRequest request, Type tDocumentType)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get a document by its ID.
*
* Get a document and its source or stored fields from an index.
*
* By default, this API is realtime and is not affected by the refresh rate of
* the index (when data will become visible for search). In the case where
* stored fields are requested with the stored_fields
parameter and
* the document has been updated but is not yet refreshed, the API will have to
* parse and analyze the source to extract the stored fields. To turn off
* realtime behavior, set the realtime
parameter to false.
*
* Source filtering
*
* By default, the API returns the contents of the _source
field
* unless you have used the stored_fields
parameter or the
* _source
field is turned off. You can turn off
* _source
retrieval by using the _source
parameter:
*
*
* GET my-index-000001/_doc/0?_source=false
*
*
*
* If you only need one or two fields from the _source
, use the
* _source_includes
or _source_excludes
parameters to
* include or filter out particular fields. This can be helpful with large
* documents where partial retrieval can save on network overhead Both
* parameters take a comma separated list of fields or wildcard expressions. For
* example:
*
*
* GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
*
*
*
* If you only want to specify includes, you can use a shorter notation:
*
*
* GET my-index-000001/_doc/0?_source=*.id
*
*
*
* Routing
*
* If routing is used during indexing, the routing value also needs to be
* specified to retrieve a document. For example:
*
*
* GET my-index-000001/_doc/2?routing=user1
*
*
*
* This request gets the document with ID 2, but it is routed based on the user.
* The document is not fetched if the correct routing is not specified.
*
* Distributed
*
* The GET operation is hashed into a specific shard ID. It is then redirected
* to one of the replicas within that shard ID and returns the result. The
* replicas are the primary shard and its replicas within that shard ID group.
* This means that the more replicas you have, the better your GET scaling will
* be.
*
* Versioning support
*
* You can use the version
parameter to retrieve the document only
* if its current version is equal to the specified one.
*
* Internally, Elasticsearch has marked the old document as deleted and added an
* entirely new document. The old version of the document doesn't disappear
* immediately, although you won't be able to access it. Elasticsearch cleans up
* deleted documents in the background as you continue to index more data.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetRequest}
* @see Documentation
* on elastic.co
*/
public final GetResponse get(Function> fn,
Type tDocumentType) throws IOException, ElasticsearchException {
return get(fn.apply(new GetRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: get_script
/**
* Get a script or search template. Retrieves a stored script or search
* template.
*
* @see Documentation
* on elastic.co
*/
public GetScriptResponse getScript(GetScriptRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetScriptRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get a script or search template. Retrieves a stored script or search
* template.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetScriptRequest}
* @see Documentation
* on elastic.co
*/
public final GetScriptResponse getScript(Function> fn)
throws IOException, ElasticsearchException {
return getScript(fn.apply(new GetScriptRequest.Builder()).build());
}
// ----- Endpoint: get_script_context
/**
* Get script contexts.
*
* Get a list of supported script contexts and their methods.
*
* @see Documentation
* on elastic.co
*/
public GetScriptContextResponse getScriptContext() throws IOException, ElasticsearchException {
return this.transport.performRequest(GetScriptContextRequest._INSTANCE, GetScriptContextRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: get_script_languages
/**
* Get script languages.
*
* Get a list of available script types, languages, and contexts.
*
* @see Documentation
* on elastic.co
*/
public GetScriptLanguagesResponse getScriptLanguages() throws IOException, ElasticsearchException {
return this.transport.performRequest(GetScriptLanguagesRequest._INSTANCE, GetScriptLanguagesRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: get_source
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @see Documentation
* on elastic.co
*/
public GetSourceResponse getSource(GetSourceRequest request, Class tDocumentClass)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get_source.Response.TDocument",
getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link GetSourceRequest}
* @see Documentation
* on elastic.co
*/
public final GetSourceResponse getSource(
Function> fn, Class tDocumentClass)
throws IOException, ElasticsearchException {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #getSource(GetSourceRequest, Class)}, where Class is
* defined as Void, meaning the documents will not be deserialized.
*/
public GetSourceResponse getSource(GetSourceRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #getSource(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final GetSourceResponse getSource(
Function> fn)
throws IOException, ElasticsearchException {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), Void.class);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @see Documentation
* on elastic.co
*/
public GetSourceResponse getSource(GetSourceRequest request, Type tDocumentType)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetSourceRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.get_source.Response.TDocument",
getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get a document's source.
*
* Get the source of a document. For example:
*
*
* GET my-index-000001/_source/1
*
*
*
* You can use the source filtering parameters to control which parts of the
* _source
are returned:
*
*
* GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link GetSourceRequest}
* @see Documentation
* on elastic.co
*/
public final GetSourceResponse getSource(
Function> fn, Type tDocumentType)
throws IOException, ElasticsearchException {
return getSource(fn.apply(new GetSourceRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: health_report
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @see Documentation
* on elastic.co
*/
public HealthReportResponse healthReport(HealthReportRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) HealthReportRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @param fn
* a function that initializes a builder to create the
* {@link HealthReportRequest}
* @see Documentation
* on elastic.co
*/
public final HealthReportResponse healthReport(
Function> fn)
throws IOException, ElasticsearchException {
return healthReport(fn.apply(new HealthReportRequest.Builder()).build());
}
/**
* Get the cluster health. Get a report with the health status of an
* Elasticsearch cluster. The report contains a list of indicators that compose
* Elasticsearch functionality.
*
* Each indicator has a health status of: green, unknown, yellow or red. The
* indicator will provide an explanation and metadata describing the reason for
* its current health status.
*
* The cluster’s status is controlled by the worst indicator status.
*
* In the event that an indicator’s status is non-green, a list of impacts may
* be present in the indicator result which detail the functionalities that are
* negatively affected by the health issue. Each impact carries with it a
* severity level, an area of the system that is affected, and a simple
* description of the impact on the system.
*
* Some health indicators can determine the root cause of a health problem and
* prescribe a set of steps that can be performed in order to improve the health
* of the system. The root cause and remediation steps are encapsulated in a
* diagnosis. A diagnosis contains a cause detailing a root cause analysis, an
* action containing a brief description of the steps to take to fix the
* problem, the list of affected resources (if applicable), and a detailed
* step-by-step troubleshooting guide to fix the diagnosed problem.
*
* NOTE: The health indicators perform root cause analysis of non-green health
* statuses. This can be computationally expensive when called frequently. When
* setting up automated polling of the API for health status, set verbose to
* false to disable the more expensive analysis logic.
*
* @see Documentation
* on elastic.co
*/
public HealthReportResponse healthReport() throws IOException, ElasticsearchException {
return this.transport.performRequest(new HealthReportRequest.Builder().build(), HealthReportRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: index
/**
* Create or update a document in an index.
*
* Add a JSON document to the specified data stream or index and make it
* searchable. If the target is an index and the document already exists, the
* request updates the document and increments its version.
*
* NOTE: You cannot use this API to send update requests for existing documents
* in a data stream.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add or overwrite a document using the
*
PUT /<target>/_doc/<_id>
request format, you must
* have the create
, index
, or write
index
* privilege.
* - To add a document using the
POST /<target>/_doc/
* request format, you must have the create_doc
,
* create
, index
, or write
index
* privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* NOTE: Replica shards might not all be started when an indexing operation
* returns successfully. By default, only the primary is required. Set
* wait_for_active_shards
to change this default behavior.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Optimistic concurrency control
*
* Index operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* No operation (noop) updates
*
* When updating a document by using this API, a new version of the document is
* always created even if the document hasn't changed. If this isn't acceptable
* use the _update
API with detect_noop
set to
* true
. The detect_noop
option isn't available on
* this API because it doesn’t fetch the old source and isn't able to compare it
* against the new source.
*
* There isn't a definitive rule for when noop updates aren't acceptable. It's a
* combination of lots of factors like how frequently your data source sends
* updates that are actually noops and how many queries per second Elasticsearch
* runs on the shard receiving the updates.
*
* Versioning
*
* Each indexed document is given a version number. By default, internal
* versioning is used that starts at 1 and increments with each update, deletes
* included. Optionally, the version number can be set to an external value (for
* example, if maintained in a database). To enable this functionality,
* version_type
should be set to external
. The value
* provided must be a numeric, long value greater than or equal to 0, and less
* than around 9.2e+18
.
*
* NOTE: Versioning is completely real time, and is not affected by the near
* real time aspects of search operations. If no version is provided, the
* operation runs without any version checks.
*
* When using the external version type, the system checks to see if the version
* number passed to the index request is greater than the version of the
* currently stored document. If true, the document will be indexed and the new
* version number used. If the value provided is less than or equal to the
* stored document's version number, a version conflict will occur and the index
* operation will fail. For example:
*
*
* PUT my-index-000001/_doc/1?version=2&version_type=external
* {
* "user": {
* "id": "elkbee"
* }
* }
*
* In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
* If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
*
* A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
* Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
*
*
*
* @see Documentation
* on elastic.co
*/
public IndexResponse index(IndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, IndexResponse, ErrorResponse> endpoint = (JsonEndpoint, IndexResponse, ErrorResponse>) IndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create or update a document in an index.
*
* Add a JSON document to the specified data stream or index and make it
* searchable. If the target is an index and the document already exists, the
* request updates the document and increments its version.
*
* NOTE: You cannot use this API to send update requests for existing documents
* in a data stream.
*
* If the Elasticsearch security features are enabled, you must have the
* following index privileges for the target data stream, index, or index alias:
*
* - To add or overwrite a document using the
*
PUT /<target>/_doc/<_id>
request format, you must
* have the create
, index
, or write
index
* privilege.
* - To add a document using the
POST /<target>/_doc/
* request format, you must have the create_doc
,
* create
, index
, or write
index
* privilege.
* - To automatically create a data stream or index with this API request, you
* must have the
auto_configure
, create_index
, or
* manage
index privilege.
*
*
* Automatic data stream creation requires a matching index template with data
* stream enabled.
*
* NOTE: Replica shards might not all be started when an indexing operation
* returns successfully. By default, only the primary is required. Set
* wait_for_active_shards
to change this default behavior.
*
* Automatically create data streams and indices
*
* If the request's target doesn't exist and matches an index template with a
* data_stream
definition, the index operation automatically
* creates the data stream.
*
* If the target doesn't exist and doesn't match a data stream template, the
* operation automatically creates the index and applies any matching index
* templates.
*
* NOTE: Elasticsearch includes several built-in index templates. To avoid
* naming collisions with these templates, refer to index pattern documentation.
*
* If no mapping exists, the index operation creates a dynamic mapping. By
* default, new fields and objects are automatically added to the mapping if
* needed.
*
* Automatic index creation is controlled by the
* action.auto_create_index
setting. If it is true
,
* any index can be created automatically. You can modify this setting to
* explicitly allow or block automatic creation of indices that match specified
* patterns or set it to false
to turn off automatic index creation
* entirely. Specify a comma-separated list of patterns you want to allow or
* prefix each pattern with +
or -
to indicate whether
* it should be allowed or blocked. When a list is specified, the default
* behaviour is to disallow.
*
* NOTE: The action.auto_create_index
setting affects the automatic
* creation of indices only. It does not affect the creation of data streams.
*
* Optimistic concurrency control
*
* Index operations can be made conditional and only be performed if the last
* modification to the document was assigned the sequence number and primary
* term specified by the if_seq_no
and if_primary_term
* parameters. If a mismatch is detected, the operation will result in a
* VersionConflictException
and a status code of 409
.
*
* Routing
*
* By default, shard placement — or routing — is controlled by using a hash of
* the document's ID value. For more explicit control, the value fed into the
* hash function used by the router can be directly specified on a per-operation
* basis using the routing
parameter.
*
* When setting up explicit mapping, you can also use the _routing
* field to direct the index operation to extract the routing value from the
* document itself. This does come at the (very minimal) cost of an additional
* document parsing pass. If the _routing
mapping is defined and
* set to be required, the index operation will fail if no routing value is
* provided or extracted.
*
* NOTE: Data streams do not support custom routing unless they were created
* with the allow_custom_routing
setting enabled in the template.
*
* Distributed
*
* The index operation is directed to the primary shard based on its route and
* performed on the actual node containing this shard. After the primary shard
* completes the operation, if needed, the update is distributed to applicable
* replicas.
*
* Active shards
*
* To improve the resiliency of writes to the system, indexing operations can be
* configured to wait for a certain number of active shard copies before
* proceeding with the operation. If the requisite number of active shard copies
* are not available, then the write operation must wait and retry, until either
* the requisite shard copies have started or a timeout occurs. By default,
* write operations only wait for the primary shards to be active before
* proceeding (that is to say wait_for_active_shards
is
* 1
). This default can be overridden in the index settings
* dynamically by setting index.write.wait_for_active_shards
. To
* alter this behavior per operation, use the
* wait_for_active_shards request
parameter.
*
* Valid values are all or any positive integer up to the total number of
* configured copies per shard in the index (which is
* number_of_replicas
+1). Specifying a negative value or a number
* greater than the number of shard copies will throw an error.
*
* For example, suppose you have a cluster of three nodes, A, B, and C and you
* create an index index with the number of replicas set to 3 (resulting in 4
* shard copies, one more copy than there are nodes). If you attempt an indexing
* operation, by default the operation will only ensure the primary copy of each
* shard is available before proceeding. This means that even if B and C went
* down and A hosted the primary shard copies, the indexing operation would
* still proceed with only one copy of the data. If
* wait_for_active_shards
is set on the request to 3
* (and all three nodes are up), the indexing operation will require 3 active
* shard copies before proceeding. This requirement should be met because there
* are 3 active nodes in the cluster, each one holding a copy of the shard.
* However, if you set wait_for_active_shards
to all
* (or to 4
, which is the same in this situation), the indexing
* operation will not proceed as you do not have all 4 copies of each shard
* active in the index. The operation will timeout unless a new node is brought
* up in the cluster to host the fourth copy of the shard.
*
* It is important to note that this setting greatly reduces the chances of the
* write operation not writing to the requisite number of shard copies, but it
* does not completely eliminate the possibility, because this check occurs
* before the write operation starts. After the write operation is underway, it
* is still possible for replication to fail on any number of shard copies but
* still succeed on the primary. The _shards
section of the API
* response reveals the number of shard copies on which replication succeeded
* and failed.
*
* No operation (noop) updates
*
* When updating a document by using this API, a new version of the document is
* always created even if the document hasn't changed. If this isn't acceptable
* use the _update
API with detect_noop
set to
* true
. The detect_noop
option isn't available on
* this API because it doesn’t fetch the old source and isn't able to compare it
* against the new source.
*
* There isn't a definitive rule for when noop updates aren't acceptable. It's a
* combination of lots of factors like how frequently your data source sends
* updates that are actually noops and how many queries per second Elasticsearch
* runs on the shard receiving the updates.
*
* Versioning
*
* Each indexed document is given a version number. By default, internal
* versioning is used that starts at 1 and increments with each update, deletes
* included. Optionally, the version number can be set to an external value (for
* example, if maintained in a database). To enable this functionality,
* version_type
should be set to external
. The value
* provided must be a numeric, long value greater than or equal to 0, and less
* than around 9.2e+18
.
*
* NOTE: Versioning is completely real time, and is not affected by the near
* real time aspects of search operations. If no version is provided, the
* operation runs without any version checks.
*
* When using the external version type, the system checks to see if the version
* number passed to the index request is greater than the version of the
* currently stored document. If true, the document will be indexed and the new
* version number used. If the value provided is less than or equal to the
* stored document's version number, a version conflict will occur and the index
* operation will fail. For example:
*
*
* PUT my-index-000001/_doc/1?version=2&version_type=external
* {
* "user": {
* "id": "elkbee"
* }
* }
*
* In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
* If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
*
* A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
* Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link IndexRequest}
* @see Documentation
* on elastic.co
*/
public final IndexResponse index(
Function, ObjectBuilder>> fn)
throws IOException, ElasticsearchException {
return index(fn.apply(new IndexRequest.Builder()).build());
}
// ----- Endpoint: info
/**
* Get cluster info. Get basic build, version, and cluster information.
*
* @see Documentation
* on elastic.co
*/
public InfoResponse info() throws IOException, ElasticsearchException {
return this.transport.performRequest(InfoRequest._INSTANCE, InfoRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: mget
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @see Documentation
* on elastic.co
*/
public MgetResponse mget(MgetRequest request, Class tDocumentClass)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @param fn
* a function that initializes a builder to create the
* {@link MgetRequest}
* @see Documentation
* on elastic.co
*/
public final MgetResponse mget(Function> fn,
Class tDocumentClass) throws IOException, ElasticsearchException {
return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #mget(MgetRequest, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public MgetResponse mget(MgetRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #mget(Function, Class)}, where Class is defined as Void,
* meaning the documents will not be deserialized.
*/
public final MgetResponse mget(Function> fn)
throws IOException, ElasticsearchException {
return mget(fn.apply(new MgetRequest.Builder()).build(), Void.class);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @see Documentation
* on elastic.co
*/
public MgetResponse mget(MgetRequest request, Type tDocumentType)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MgetRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.mget.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get multiple documents.
*
* Get multiple JSON documents by ID from one or more indices. If you specify an
* index in the request URI, you only need to specify the document IDs in the
* request body. To ensure fast responses, this multi get (mget) API responds
* with partial results if one or more shards fail.
*
* Filter source fields
*
* By default, the _source
field is returned for every document (if
* stored). Use the _source
and _source_include
or
* source_exclude
attributes to filter what fields are returned for
* a particular document. You can include the _source
,
* _source_includes
, and _source_excludes
query
* parameters in the request URI to specify the defaults to use when there are
* no per-document instructions.
*
* Get stored fields
*
* Use the stored_fields
attribute to specify the set of stored
* fields you want to retrieve. Any requested fields that are not stored are
* ignored. You can include the stored_fields
query parameter in
* the request URI to specify the defaults to use when there are no per-document
* instructions.
*
* @param fn
* a function that initializes a builder to create the
* {@link MgetRequest}
* @see Documentation
* on elastic.co
*/
public final MgetResponse mget(Function> fn,
Type tDocumentType) throws IOException, ElasticsearchException {
return mget(fn.apply(new MgetRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: msearch
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @see Documentation
* on elastic.co
*/
public MsearchResponse msearch(MsearchRequest request, Class tDocumentClass)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchRequest}
* @see Documentation
* on elastic.co
*/
public final MsearchResponse msearch(
Function> fn, Class tDocumentClass)
throws IOException, ElasticsearchException {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #msearch(MsearchRequest, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public MsearchResponse msearch(MsearchRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #msearch(Function, Class)}, where Class is defined as
* Void, meaning the documents will not be deserialized.
*/
public final MsearchResponse msearch(Function> fn)
throws IOException, ElasticsearchException {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), Void.class);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @see Documentation
* on elastic.co
*/
public MsearchResponse msearch(MsearchRequest request, Type tDocumentType)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch.Response.TDocument", getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Run multiple searches.
*
* The format of the request is similar to the bulk API format and makes use of
* the newline delimited JSON (NDJSON) format. The structure is as follows:
*
*
* header\n
* body\n
* header\n
* body\n
*
*
*
* This structure is specifically optimized to reduce parsing if a specific
* search ends up redirected to another node.
*
* IMPORTANT: The final line of data must end with a newline character
* \n
. Each newline character may be preceded by a carriage return
* \r
. When sending requests to this endpoint the
* Content-Type
header should be set to
* application/x-ndjson
.
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchRequest}
* @see Documentation
* on elastic.co
*/
public final MsearchResponse msearch(
Function> fn, Type tDocumentType)
throws IOException, ElasticsearchException {
return msearch(fn.apply(new MsearchRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: msearch_template
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @see Documentation
* on elastic.co
*/
public MsearchTemplateResponse msearchTemplate(MsearchTemplateRequest request,
Class tDocumentClass) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch_template.Response.TDocument",
getDeserializer(tDocumentClass));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final MsearchTemplateResponse msearchTemplate(
Function> fn,
Class tDocumentClass) throws IOException, ElasticsearchException {
return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), tDocumentClass);
}
/**
* Overload of {@link #msearchTemplate(MsearchTemplateRequest, Class)}, where
* Class is defined as Void, meaning the documents will not be deserialized.
*/
public MsearchTemplateResponse msearchTemplate(MsearchTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Overload of {@link #msearchTemplate(Function, Class)}, where Class is defined
* as Void, meaning the documents will not be deserialized.
*/
public final MsearchTemplateResponse msearchTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), Void.class);
}
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @see Documentation
* on elastic.co
*/
public MsearchTemplateResponse msearchTemplate(MsearchTemplateRequest request,
Type tDocumentType) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) MsearchTemplateRequest._ENDPOINT;
endpoint = new EndpointWithResponseMapperAttr<>(endpoint,
"co.elastic.clients:Deserializer:_global.msearch_template.Response.TDocument",
getDeserializer(tDocumentType));
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Run multiple templated searches.
*
* Run multiple templated searches with a single request. If you are providing a
* text file or text input to curl
, use the
* --data-binary
flag instead of -d
to preserve
* newlines. For example:
*
*
* $ cat requests
* { "index": "my-index" }
* { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
* { "index": "my-other-index" }
* { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
*
* $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link MsearchTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final MsearchTemplateResponse msearchTemplate(
Function> fn, Type tDocumentType)
throws IOException, ElasticsearchException {
return msearchTemplate(fn.apply(new MsearchTemplateRequest.Builder()).build(), tDocumentType);
}
// ----- Endpoint: mtermvectors
/**
* Get multiple term vectors.
*
* Get multiple term vectors with a single request. You can specify existing
* documents by index and ID or provide artificial documents in the body of the
* request. You can specify the index in the request body or request URI. The
* response contains a docs
array with all the fetched termvectors.
* Each element has the structure provided by the termvectors API.
*
* Artificial documents
*
* You can also use mtermvectors
to generate term vectors for
* artificial documents provided in the body of the request. The mapping used is
* determined by the specified _index
.
*
* @see Documentation
* on elastic.co
*/
public MtermvectorsResponse mtermvectors(MtermvectorsRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) MtermvectorsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get multiple term vectors.
*
* Get multiple term vectors with a single request. You can specify existing
* documents by index and ID or provide artificial documents in the body of the
* request. You can specify the index in the request body or request URI. The
* response contains a docs
array with all the fetched termvectors.
* Each element has the structure provided by the termvectors API.
*
* Artificial documents
*
* You can also use mtermvectors
to generate term vectors for
* artificial documents provided in the body of the request. The mapping used is
* determined by the specified _index
.
*
* @param fn
* a function that initializes a builder to create the
* {@link MtermvectorsRequest}
* @see Documentation
* on elastic.co
*/
public final MtermvectorsResponse mtermvectors(
Function> fn)
throws IOException, ElasticsearchException {
return mtermvectors(fn.apply(new MtermvectorsRequest.Builder()).build());
}
/**
* Get multiple term vectors.
*
* Get multiple term vectors with a single request. You can specify existing
* documents by index and ID or provide artificial documents in the body of the
* request. You can specify the index in the request body or request URI. The
* response contains a docs
array with all the fetched termvectors.
* Each element has the structure provided by the termvectors API.
*
* Artificial documents
*
* You can also use mtermvectors
to generate term vectors for
* artificial documents provided in the body of the request. The mapping used is
* determined by the specified _index
.
*
* @see Documentation
* on elastic.co
*/
public MtermvectorsResponse mtermvectors() throws IOException, ElasticsearchException {
return this.transport.performRequest(new MtermvectorsRequest.Builder().build(), MtermvectorsRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: open_point_in_time
/**
* Open a point in time.
*
* A search request by default runs against the most recent visible data of the
* target indices, which is called point in time. Elasticsearch pit (point in
* time) is a lightweight view into the state of the data as it existed when
* initiated. In some cases, it’s preferred to perform multiple search requests
* using the same point in time. For example, if refreshes happen between
* search_after
requests, then the results of those requests might
* not be consistent as changes happening between searches are only visible to
* the more recent point in time.
*
* A point in time must be opened explicitly before being used in search
* requests.
*
* A subsequent search request with the pit
parameter must not
* specify index
, routing
, or preference
* values as these parameters are copied from the point in time.
*
* Just like regular searches, you can use from
and
* size
to page through point in time search results, up to the
* first 10,000 hits. If you want to retrieve more hits, use PIT with
* search_after
.
*
* IMPORTANT: The open point in time request and each subsequent search request
* can return different identifiers; always use the most recently received ID
* for the next search request.
*
* When a PIT that contains shard failures is used in a search request, the
* missing are always reported in the search response as a
* NoShardAvailableActionException
exception. To get rid of these
* exceptions, a new PIT needs to be created so that shards missing from the
* previous PIT can be handled, assuming they become available in the meantime.
*
* Keeping point in time alive
*
* The keep_alive
parameter, which is passed to a open point in
* time request and search request, extends the time to live of the
* corresponding point in time. The value does not need to be long enough to
* process all data — it just needs to be long enough for the next request.
*
* Normally, the background merge process optimizes the index by merging
* together smaller segments to create new, bigger segments. Once the smaller
* segments are no longer needed they are deleted. However, open point-in-times
* prevent the old segments from being deleted since they are still in use.
*
* TIP: Keeping older segments alive means that more disk space and file handles
* are needed. Ensure that you have configured your nodes to have ample free
* file handles.
*
* Additionally, if a segment contains deleted or updated documents then the
* point in time must keep track of whether each document in the segment was
* live at the time of the initial search request. Ensure that your nodes have
* sufficient heap space if you have many open point-in-times on an index that
* is subject to ongoing deletes or updates. Note that a point-in-time doesn't
* prevent its associated indices from being deleted. You can check how many
* point-in-times (that is, search contexts) are open with the nodes stats API.
*
* @see Documentation
* on elastic.co
*/
public OpenPointInTimeResponse openPointInTime(OpenPointInTimeRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) OpenPointInTimeRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Open a point in time.
*
* A search request by default runs against the most recent visible data of the
* target indices, which is called point in time. Elasticsearch pit (point in
* time) is a lightweight view into the state of the data as it existed when
* initiated. In some cases, it’s preferred to perform multiple search requests
* using the same point in time. For example, if refreshes happen between
* search_after
requests, then the results of those requests might
* not be consistent as changes happening between searches are only visible to
* the more recent point in time.
*
* A point in time must be opened explicitly before being used in search
* requests.
*
* A subsequent search request with the pit
parameter must not
* specify index
, routing
, or preference
* values as these parameters are copied from the point in time.
*
* Just like regular searches, you can use from
and
* size
to page through point in time search results, up to the
* first 10,000 hits. If you want to retrieve more hits, use PIT with
* search_after
.
*
* IMPORTANT: The open point in time request and each subsequent search request
* can return different identifiers; always use the most recently received ID
* for the next search request.
*
* When a PIT that contains shard failures is used in a search request, the
* missing are always reported in the search response as a
* NoShardAvailableActionException
exception. To get rid of these
* exceptions, a new PIT needs to be created so that shards missing from the
* previous PIT can be handled, assuming they become available in the meantime.
*
* Keeping point in time alive
*
* The keep_alive
parameter, which is passed to a open point in
* time request and search request, extends the time to live of the
* corresponding point in time. The value does not need to be long enough to
* process all data — it just needs to be long enough for the next request.
*
* Normally, the background merge process optimizes the index by merging
* together smaller segments to create new, bigger segments. Once the smaller
* segments are no longer needed they are deleted. However, open point-in-times
* prevent the old segments from being deleted since they are still in use.
*
* TIP: Keeping older segments alive means that more disk space and file handles
* are needed. Ensure that you have configured your nodes to have ample free
* file handles.
*
* Additionally, if a segment contains deleted or updated documents then the
* point in time must keep track of whether each document in the segment was
* live at the time of the initial search request. Ensure that your nodes have
* sufficient heap space if you have many open point-in-times on an index that
* is subject to ongoing deletes or updates. Note that a point-in-time doesn't
* prevent its associated indices from being deleted. You can check how many
* point-in-times (that is, search contexts) are open with the nodes stats API.
*
* @param fn
* a function that initializes a builder to create the
* {@link OpenPointInTimeRequest}
* @see Documentation
* on elastic.co
*/
public final OpenPointInTimeResponse openPointInTime(
Function> fn)
throws IOException, ElasticsearchException {
return openPointInTime(fn.apply(new OpenPointInTimeRequest.Builder()).build());
}
// ----- Endpoint: ping
/**
* Ping the cluster. Get information about whether the cluster is running.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse ping() throws IOException, ElasticsearchException {
return this.transport.performRequest(PingRequest._INSTANCE, PingRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: put_script
/**
* Create or update a script or search template. Creates or updates a stored
* script or search template.
*
* @see Documentation
* on elastic.co
*/
public PutScriptResponse putScript(PutScriptRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutScriptRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create or update a script or search template. Creates or updates a stored
* script or search template.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutScriptRequest}
* @see Documentation
* on elastic.co
*/
public final PutScriptResponse putScript(Function> fn)
throws IOException, ElasticsearchException {
return putScript(fn.apply(new PutScriptRequest.Builder()).build());
}
// ----- Endpoint: rank_eval
/**
* Evaluate ranked search results.
*
* Evaluate the quality of ranked search results over a set of typical search
* queries.
*
* @see Documentation
* on elastic.co
*/
public RankEvalResponse rankEval(RankEvalRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) RankEvalRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Evaluate ranked search results.
*
* Evaluate the quality of ranked search results over a set of typical search
* queries.
*
* @param fn
* a function that initializes a builder to create the
* {@link RankEvalRequest}
* @see Documentation
* on elastic.co
*/
public final RankEvalResponse rankEval(Function> fn)
throws IOException, ElasticsearchException {
return rankEval(fn.apply(new RankEvalRequest.Builder()).build());
}
// ----- Endpoint: reindex
/**
* Reindex documents.
*
* Copy documents from a source to a destination. You can copy all documents to
* the destination index or reindex a subset of the documents. The source can be
* any existing index, alias, or data stream. The destination must differ from
* the source. For example, you cannot reindex a data stream into itself.
*
* IMPORTANT: Reindex requires _source
to be enabled for all
* documents in the source. The destination should be configured as wanted
* before calling the reindex API. Reindex does not copy the settings from the
* source or its associated template. Mappings, shard counts, and replicas, for
* example, must be configured ahead of time.
*
* If the Elasticsearch security features are enabled, you must have the
* following security privileges:
*
* - The
read
index privilege for the source data stream, index,
* or alias.
* - The
write
index privilege for the destination data stream,
* index, or index alias.
* - To automatically create a data stream or index with a reindex API
* request, you must have the
auto_configure
,
* create_index
, or manage
index privilege for the
* destination data stream, index, or alias.
* - If reindexing from a remote cluster, the
source.remote.user
* must have the monitor
cluster privilege and the
* read
index privilege for the source data stream, index, or
* alias.
*
*
* If reindexing from a remote cluster, you must explicitly allow the remote
* host in the reindex.remote.whitelist
setting. Automatic data
* stream creation requires a matching index template with data stream enabled.
*
* The dest
element can be configured like the index API to control
* optimistic concurrency control. Omitting version_type
or setting
* it to internal
causes Elasticsearch to blindly dump documents
* into the destination, overwriting any that happen to have the same ID.
*
* Setting version_type
to external
causes
* Elasticsearch to preserve the version
from the source, create
* any documents that are missing, and update any documents that have an older
* version in the destination than they do in the source.
*
* Setting op_type
to create
causes the reindex API to
* create only missing documents in the destination. All existing documents will
* cause a version conflict.
*
* IMPORTANT: Because data streams are append-only, any reindex request to a
* destination data stream must have an op_type
of
* create
. A reindex can only add new documents to a destination
* data stream. It cannot update existing documents in a destination data
* stream.
*
* By default, version conflicts abort the reindex process. To continue
* reindexing if there are conflicts, set the conflicts
request
* body property to proceed
. In this case, the response includes a
* count of the version conflicts that were encountered. Note that the handling
* of other error types is unaffected by the conflicts
property.
* Additionally, if you opt to count version conflicts, the operation could
* attempt to reindex more documents from the source than max_docs
* until it has successfully indexed max_docs
documents into the
* target or it has gone through every document in the source query.
*
* Refer to the linked documentation for examples of how to reindex documents.
*
* @see Documentation
* on elastic.co
*/
public ReindexResponse reindex(ReindexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ReindexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Reindex documents.
*
* Copy documents from a source to a destination. You can copy all documents to
* the destination index or reindex a subset of the documents. The source can be
* any existing index, alias, or data stream. The destination must differ from
* the source. For example, you cannot reindex a data stream into itself.
*
* IMPORTANT: Reindex requires _source
to be enabled for all
* documents in the source. The destination should be configured as wanted
* before calling the reindex API. Reindex does not copy the settings from the
* source or its associated template. Mappings, shard counts, and replicas, for
* example, must be configured ahead of time.
*
* If the Elasticsearch security features are enabled, you must have the
* following security privileges:
*
* - The
read
index privilege for the source data stream, index,
* or alias.
* - The
write
index privilege for the destination data stream,
* index, or index alias.
* - To automatically create a data stream or index with a reindex API
* request, you must have the
auto_configure
,
* create_index
, or manage
index privilege for the
* destination data stream, index, or alias.
* - If reindexing from a remote cluster, the
source.remote.user
* must have the monitor
cluster privilege and the
* read
index privilege for the source data stream, index, or
* alias.
*
*
* If reindexing from a remote cluster, you must explicitly allow the remote
* host in the reindex.remote.whitelist
setting. Automatic data
* stream creation requires a matching index template with data stream enabled.
*
* The dest
element can be configured like the index API to control
* optimistic concurrency control. Omitting version_type
or setting
* it to internal
causes Elasticsearch to blindly dump documents
* into the destination, overwriting any that happen to have the same ID.
*
* Setting version_type
to external
causes
* Elasticsearch to preserve the version
from the source, create
* any documents that are missing, and update any documents that have an older
* version in the destination than they do in the source.
*
* Setting op_type
to create
causes the reindex API to
* create only missing documents in the destination. All existing documents will
* cause a version conflict.
*
* IMPORTANT: Because data streams are append-only, any reindex request to a
* destination data stream must have an op_type
of
* create
. A reindex can only add new documents to a destination
* data stream. It cannot update existing documents in a destination data
* stream.
*
* By default, version conflicts abort the reindex process. To continue
* reindexing if there are conflicts, set the conflicts
request
* body property to proceed
. In this case, the response includes a
* count of the version conflicts that were encountered. Note that the handling
* of other error types is unaffected by the conflicts
property.
* Additionally, if you opt to count version conflicts, the operation could
* attempt to reindex more documents from the source than max_docs
* until it has successfully indexed max_docs
documents into the
* target or it has gone through every document in the source query.
*
* Refer to the linked documentation for examples of how to reindex documents.
*
* @param fn
* a function that initializes a builder to create the
* {@link ReindexRequest}
* @see Documentation
* on elastic.co
*/
public final ReindexResponse reindex(Function> fn)
throws IOException, ElasticsearchException {
return reindex(fn.apply(new ReindexRequest.Builder()).build());
}
// ----- Endpoint: reindex_rethrottle
/**
* Throttle a reindex operation.
*
* Change the number of requests per second for a particular reindex operation.
* For example:
*
*
* POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
*
*
*
* Rethrottling that speeds up the query takes effect immediately. Rethrottling
* that slows down the query will take effect after completing the current
* batch. This behavior prevents scroll timeouts.
*
* @see Documentation
* on elastic.co
*/
public ReindexRethrottleResponse reindexRethrottle(ReindexRethrottleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ReindexRethrottleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Throttle a reindex operation.
*
* Change the number of requests per second for a particular reindex operation.
* For example:
*
*
* POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
*
*
*
* Rethrottling that speeds up the query takes effect immediately. Rethrottling
* that slows down the query will take effect after completing the current
* batch. This behavior prevents scroll timeouts.
*
* @param fn
* a function that initializes a builder to create the
* {@link ReindexRethrottleRequest}
* @see Documentation
* on elastic.co
*/
public final ReindexRethrottleResponse reindexRethrottle(
Function> fn)
throws IOException, ElasticsearchException {
return reindexRethrottle(fn.apply(new ReindexRethrottleRequest.Builder()).build());
}
// ----- Endpoint: render_search_template
/**
* Render a search template.
*
* Render a search template as a search request body.
*
* @see Documentation
* on elastic.co
*/
public RenderSearchTemplateResponse renderSearchTemplate(RenderSearchTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) RenderSearchTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Render a search template.
*
* Render a search template as a search request body.
*
* @param fn
* a function that initializes a builder to create the
* {@link RenderSearchTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final RenderSearchTemplateResponse renderSearchTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return renderSearchTemplate(fn.apply(new RenderSearchTemplateRequest.Builder()).build());
}
/**
* Render a search template.
*
* Render a search template as a search request body.
*
* @see