
co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesAsyncClient Maven / Gradle / Ivy
Show all versions of org.apache.servicemix.bundles.elasticsearch-java
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package co.elastic.clients.elasticsearch.indices;
import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.util.ObjectBuilder;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.annotation.Nullable;
//----------------------------------------------------------------
// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------
/**
* Client for the indices namespace.
*/
public class ElasticsearchIndicesAsyncClient
extends
ApiClient {
public ElasticsearchIndicesAsyncClient(ElasticsearchTransport transport) {
super(transport, null);
}
public ElasticsearchIndicesAsyncClient(ElasticsearchTransport transport,
@Nullable TransportOptions transportOptions) {
super(transport, transportOptions);
}
@Override
public ElasticsearchIndicesAsyncClient withTransportOptions(@Nullable TransportOptions transportOptions) {
return new ElasticsearchIndicesAsyncClient(this.transport, transportOptions);
}
// ----- Endpoint: indices.add_block
/**
* Add an index block.
*
* Add an index block to an index. Index blocks limit the operations allowed on
* an index by blocking specific operation types.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture addBlock(AddBlockRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) AddBlockRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Add an index block.
*
* Add an index block to an index. Index blocks limit the operations allowed on
* an index by blocking specific operation types.
*
* @param fn
* a function that initializes a builder to create the
* {@link AddBlockRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture addBlock(
Function> fn) {
return addBlock(fn.apply(new AddBlockRequest.Builder()).build());
}
// ----- Endpoint: indices.analyze
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture analyze(AnalyzeRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) AnalyzeRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @param fn
* a function that initializes a builder to create the
* {@link AnalyzeRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture analyze(
Function> fn) {
return analyze(fn.apply(new AnalyzeRequest.Builder()).build());
}
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture analyze() {
return this.transport.performRequestAsync(new AnalyzeRequest.Builder().build(), AnalyzeRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.cancel_migrate_reindex
/**
* Cancel a migration reindex operation.
*
* Cancel a migration reindex attempt for a data stream or index.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture cancelMigrateReindex(CancelMigrateReindexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CancelMigrateReindexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Cancel a migration reindex operation.
*
* Cancel a migration reindex attempt for a data stream or index.
*
* @param fn
* a function that initializes a builder to create the
* {@link CancelMigrateReindexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture cancelMigrateReindex(
Function> fn) {
return cancelMigrateReindex(fn.apply(new CancelMigrateReindexRequest.Builder()).build());
}
// ----- Endpoint: indices.clear_cache
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture clearCache(ClearCacheRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClearCacheRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClearCacheRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture clearCache(
Function> fn) {
return clearCache(fn.apply(new ClearCacheRequest.Builder()).build());
}
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture clearCache() {
return this.transport.performRequestAsync(new ClearCacheRequest.Builder().build(), ClearCacheRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.clone
/**
* Clone an index. Clone an existing index into a new index. Each original
* primary shard is cloned into a new primary shard in the new index.
*
* IMPORTANT: Elasticsearch does not apply index templates to the resulting
* index. The API also does not copy index metadata from the original index.
* Index metadata includes aliases, index lifecycle management phase
* definitions, and cross-cluster replication (CCR) follower information. For
* example, if you clone a CCR follower index, the resulting clone will not be a
* follower index.
*
* The clone API copies most index settings from the source index to the
* resulting index, with the exception of index.number_of_replicas
* and index.auto_expand_replicas
. To set the number of replicas in
* the resulting index, configure these settings in the clone request.
*
* Cloning works as follows:
*
* - First, it creates a new target index with the same definition as the
* source index.
* - Then it hard-links segments from the source index into the target index.
* If the file system does not support hard-linking, all segments are copied
* into the new index, which is a much more time consuming process.
* - Finally, it recovers the target index as though it were a closed index
* which had just been re-opened.
*
*
* IMPORTANT: Indices can only be cloned if they meet the following
* requirements:
*
* - The index must be marked as read-only and have a cluster health status of
* green.
* - The target index must not exist.
* - The source index must have the same number of primary shards as the
* target index.
* - The node handling the clone process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* The current write index on a data stream cannot be cloned. In order to clone
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be cloned.
*
* NOTE: Mappings cannot be specified in the _clone
request. The
* mappings of the source index will be used for the target index.
*
* Monitor the cloning process
*
* The cloning process can be monitored with the cat recovery API or the cluster
* health API can be used to wait until all primary shards have been allocated
* by setting the wait_for_status
parameter to yellow
.
*
* The _clone
API returns as soon as the target index has been
* added to the cluster state, before any shards have been allocated. At this
* point, all shards are in the state unassigned. If, for any reason, the target
* index can't be allocated, its primary shard will remain unassigned until it
* can be allocated on that node.
*
* Once the primary shard is allocated, it moves to state initializing, and the
* clone process begins. When the clone operation completes, the shard will
* become active. At that point, Elasticsearch will try to allocate any replicas
* and may decide to relocate the primary shard to another node.
*
* Wait for active shards
*
* Because the clone operation creates a new index to clone the shards to, the
* wait for active shards setting on index creation applies to the clone index
* action as well.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture clone(CloneIndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CloneIndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Clone an index. Clone an existing index into a new index. Each original
* primary shard is cloned into a new primary shard in the new index.
*
* IMPORTANT: Elasticsearch does not apply index templates to the resulting
* index. The API also does not copy index metadata from the original index.
* Index metadata includes aliases, index lifecycle management phase
* definitions, and cross-cluster replication (CCR) follower information. For
* example, if you clone a CCR follower index, the resulting clone will not be a
* follower index.
*
* The clone API copies most index settings from the source index to the
* resulting index, with the exception of index.number_of_replicas
* and index.auto_expand_replicas
. To set the number of replicas in
* the resulting index, configure these settings in the clone request.
*
* Cloning works as follows:
*
* - First, it creates a new target index with the same definition as the
* source index.
* - Then it hard-links segments from the source index into the target index.
* If the file system does not support hard-linking, all segments are copied
* into the new index, which is a much more time consuming process.
* - Finally, it recovers the target index as though it were a closed index
* which had just been re-opened.
*
*
* IMPORTANT: Indices can only be cloned if they meet the following
* requirements:
*
* - The index must be marked as read-only and have a cluster health status of
* green.
* - The target index must not exist.
* - The source index must have the same number of primary shards as the
* target index.
* - The node handling the clone process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* The current write index on a data stream cannot be cloned. In order to clone
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be cloned.
*
* NOTE: Mappings cannot be specified in the _clone
request. The
* mappings of the source index will be used for the target index.
*
* Monitor the cloning process
*
* The cloning process can be monitored with the cat recovery API or the cluster
* health API can be used to wait until all primary shards have been allocated
* by setting the wait_for_status
parameter to yellow
.
*
* The _clone
API returns as soon as the target index has been
* added to the cluster state, before any shards have been allocated. At this
* point, all shards are in the state unassigned. If, for any reason, the target
* index can't be allocated, its primary shard will remain unassigned until it
* can be allocated on that node.
*
* Once the primary shard is allocated, it moves to state initializing, and the
* clone process begins. When the clone operation completes, the shard will
* become active. At that point, Elasticsearch will try to allocate any replicas
* and may decide to relocate the primary shard to another node.
*
* Wait for active shards
*
* Because the clone operation creates a new index to clone the shards to, the
* wait for active shards setting on index creation applies to the clone index
* action as well.
*
* @param fn
* a function that initializes a builder to create the
* {@link CloneIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture clone(
Function> fn) {
return clone(fn.apply(new CloneIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.close
/**
* Close an index. A closed index is blocked for read or write operations and
* does not allow all operations that opened indices allow. It is not possible
* to index documents or to search for documents in a closed index. Closed
* indices do not have to maintain internal data structures for indexing or
* searching documents, which results in a smaller overhead on the cluster.
*
* When opening or closing an index, the master node is responsible for
* restarting the index shards to reflect the new state of the index. The shards
* will then go through the normal recovery process. The data of opened and
* closed indices is automatically replicated by the cluster to ensure that
* enough shard copies are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behaviour can be turned off using
* the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change
* the action.destructive_requires_name
setting to
* false
. This setting can also be changed with the cluster update
* settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture close(CloseIndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CloseIndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Close an index. A closed index is blocked for read or write operations and
* does not allow all operations that opened indices allow. It is not possible
* to index documents or to search for documents in a closed index. Closed
* indices do not have to maintain internal data structures for indexing or
* searching documents, which results in a smaller overhead on the cluster.
*
* When opening or closing an index, the master node is responsible for
* restarting the index shards to reflect the new state of the index. The shards
* will then go through the normal recovery process. The data of opened and
* closed indices is automatically replicated by the cluster to ensure that
* enough shard copies are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behaviour can be turned off using
* the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change
* the action.destructive_requires_name
setting to
* false
. This setting can also be changed with the cluster update
* settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* @param fn
* a function that initializes a builder to create the
* {@link CloseIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture close(
Function> fn) {
return close(fn.apply(new CloseIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.create
/**
* Create an index. You can use the create index API to add a new index to an
* Elasticsearch cluster. When creating an index, you can specify the following:
*
* - Settings for the index.
* - Mappings for fields in the index.
* - Index aliases
*
*
* Wait for active shards
*
* By default, index creation will only return a response to the client when the
* primary copies of each shard have been started, or the request times out. The
* index creation response will indicate what happened. For example,
* acknowledged
indicates whether the index was successfully
* created in the cluster, while shards_acknowledged
indicates
* whether the requisite number of shard copies were started for each shard in
* the index before timing out. Note that it is still possible for either
* acknowledged
or shards_acknowledged
to be
* false
, but for the index creation to be successful. These values
* simply indicate whether the operation completed before the timeout. If
* acknowledged
is false, the request timed out before the cluster
* state was updated with the newly created index, but it probably will be
* created sometime soon. If shards_acknowledged
is false, then the
* request timed out before the requisite number of shards were started (by
* default just the primaries), even if the cluster state was successfully
* updated to reflect the newly created index (that is to say,
* acknowledged
is true
).
*
* You can change the default of only waiting for the primary shards to start
* through the index setting index.write.wait_for_active_shards
.
* Note that changing this setting will also affect the
* wait_for_active_shards
value on all subsequent write operations.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture create(CreateIndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateIndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Create an index. You can use the create index API to add a new index to an
* Elasticsearch cluster. When creating an index, you can specify the following:
*
* - Settings for the index.
* - Mappings for fields in the index.
* - Index aliases
*
*
* Wait for active shards
*
* By default, index creation will only return a response to the client when the
* primary copies of each shard have been started, or the request times out. The
* index creation response will indicate what happened. For example,
* acknowledged
indicates whether the index was successfully
* created in the cluster, while shards_acknowledged
indicates
* whether the requisite number of shard copies were started for each shard in
* the index before timing out. Note that it is still possible for either
* acknowledged
or shards_acknowledged
to be
* false
, but for the index creation to be successful. These values
* simply indicate whether the operation completed before the timeout. If
* acknowledged
is false, the request timed out before the cluster
* state was updated with the newly created index, but it probably will be
* created sometime soon. If shards_acknowledged
is false, then the
* request timed out before the requisite number of shards were started (by
* default just the primaries), even if the cluster state was successfully
* updated to reflect the newly created index (that is to say,
* acknowledged
is true
).
*
* You can change the default of only waiting for the primary shards to start
* through the index setting index.write.wait_for_active_shards
.
* Note that changing this setting will also affect the
* wait_for_active_shards
value on all subsequent write operations.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture create(
Function> fn) {
return create(fn.apply(new CreateIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.create_data_stream
/**
* Create a data stream.
*
* You must have a matching index template with data stream enabled.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture createDataStream(CreateDataStreamRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateDataStreamRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Create a data stream.
*
* You must have a matching index template with data stream enabled.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture createDataStream(
Function> fn) {
return createDataStream(fn.apply(new CreateDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.create_from
/**
* Create an index from a source index.
*
* Copy the mappings and settings from the source index to a destination index
* while allowing request settings and mappings to override the source values.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture createFrom(CreateFromRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateFromRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Create an index from a source index.
*
* Copy the mappings and settings from the source index to a destination index
* while allowing request settings and mappings to override the source values.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateFromRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture createFrom(
Function> fn) {
return createFrom(fn.apply(new CreateFromRequest.Builder()).build());
}
// ----- Endpoint: indices.data_streams_stats
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture dataStreamsStats(DataStreamsStatsRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DataStreamsStatsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link DataStreamsStatsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture dataStreamsStats(
Function> fn) {
return dataStreamsStats(fn.apply(new DataStreamsStatsRequest.Builder()).build());
}
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture dataStreamsStats() {
return this.transport.performRequestAsync(new DataStreamsStatsRequest.Builder().build(),
DataStreamsStatsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.delete
/**
* Delete indices. Deleting an index deletes its documents, shards, and
* metadata. It does not delete related Kibana components, such as data views,
* visualizations, or dashboards.
*
* You cannot delete the current write index of a data stream. To delete the
* index, you must roll over the data stream so a new write index is created.
* You can then use the delete index API to delete the previous write index.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture delete(DeleteIndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete indices. Deleting an index deletes its documents, shards, and
* metadata. It does not delete related Kibana components, such as data views,
* visualizations, or dashboards.
*
* You cannot delete the current write index of a data stream. To delete the
* index, you must roll over the data stream so a new write index is created.
* You can then use the delete index API to delete the previous write index.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture delete(
Function> fn) {
return delete(fn.apply(new DeleteIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_alias
/**
* Delete an alias. Removes a data stream or index from an alias.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteAlias(DeleteAliasRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteAliasRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete an alias. Removes a data stream or index from an alias.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteAliasRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteAlias(
Function> fn) {
return deleteAlias(fn.apply(new DeleteAliasRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_data_lifecycle
/**
* Delete data stream lifecycles. Removes the data stream lifecycle from a data
* stream, rendering it not managed by the data stream lifecycle.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteDataLifecycle(DeleteDataLifecycleRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteDataLifecycleRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete data stream lifecycles. Removes the data stream lifecycle from a data
* stream, rendering it not managed by the data stream lifecycle.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteDataLifecycle(
Function> fn) {
return deleteDataLifecycle(fn.apply(new DeleteDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_data_stream
/**
* Delete data streams. Deletes one or more data streams and their backing
* indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteDataStream(DeleteDataStreamRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteDataStreamRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete data streams. Deletes one or more data streams and their backing
* indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteDataStream(
Function> fn) {
return deleteDataStream(fn.apply(new DeleteDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_index_template
/**
* Delete an index template. The provided <index-template> may contain
* multiple template names separated by a comma. If multiple template names are
* specified then there is no wildcard support and the provided names should
* match completely with existing templates.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteIndexTemplate(DeleteIndexTemplateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete an index template. The provided <index-template> may contain
* multiple template names separated by a comma. If multiple template names are
* specified then there is no wildcard support and the provided names should
* match completely with existing templates.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteIndexTemplate(
Function> fn) {
return deleteIndexTemplate(fn.apply(new DeleteIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_template
/**
* Delete a legacy index template. IMPORTANT: This documentation is about legacy
* index templates, which are deprecated and will be replaced by the composable
* templates introduced in Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture deleteTemplate(DeleteTemplateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Delete a legacy index template. IMPORTANT: This documentation is about legacy
* index templates, which are deprecated and will be replaced by the composable
* templates introduced in Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture deleteTemplate(
Function> fn) {
return deleteTemplate(fn.apply(new DeleteTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.disk_usage
/**
* Analyze the index disk usage. Analyze the disk usage of each field of an
* index or data stream. This API might not support indices created in previous
* Elasticsearch versions. The result of a small index can be inaccurate as some
* parts of an index might not be analyzed by the API.
*
* NOTE: The total size of fields of the analyzed shards of the index in the
* response is usually smaller than the index store_size
value
* because some small metadata files are ignored and some parts of data files
* might not be scanned by the API. Since stored fields are stored together in a
* compressed format, the sizes of stored fields are also estimates and can be
* inaccurate. The stored size of the _id
field is likely
* underestimated while the _source
field is overestimated.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture diskUsage(DiskUsageRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DiskUsageRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Analyze the index disk usage. Analyze the disk usage of each field of an
* index or data stream. This API might not support indices created in previous
* Elasticsearch versions. The result of a small index can be inaccurate as some
* parts of an index might not be analyzed by the API.
*
* NOTE: The total size of fields of the analyzed shards of the index in the
* response is usually smaller than the index store_size
value
* because some small metadata files are ignored and some parts of data files
* might not be scanned by the API. Since stored fields are stored together in a
* compressed format, the sizes of stored fields are also estimates and can be
* inaccurate. The stored size of the _id
field is likely
* underestimated while the _source
field is overestimated.
*
* @param fn
* a function that initializes a builder to create the
* {@link DiskUsageRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture diskUsage(
Function> fn) {
return diskUsage(fn.apply(new DiskUsageRequest.Builder()).build());
}
// ----- Endpoint: indices.downsample
/**
* Downsample an index. Aggregate a time series (TSDS) index and store
* pre-computed statistical summaries (min
, max
,
* sum
, value_count
and avg
) for each
* metric field grouped by a configured time interval. For example, a TSDS index
* that contains metrics sampled every 10 seconds can be downsampled to an
* hourly index. All documents within an hour interval are summarized and stored
* as a single document in the downsample index.
*
* NOTE: Only indices in a time series data stream are supported. Neither field
* nor document level security can be defined on the source index. The source
* index must be read only (index.blocks.write: true
).
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture downsample(DownsampleRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DownsampleRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Downsample an index. Aggregate a time series (TSDS) index and store
* pre-computed statistical summaries (min
, max
,
* sum
, value_count
and avg
) for each
* metric field grouped by a configured time interval. For example, a TSDS index
* that contains metrics sampled every 10 seconds can be downsampled to an
* hourly index. All documents within an hour interval are summarized and stored
* as a single document in the downsample index.
*
* NOTE: Only indices in a time series data stream are supported. Neither field
* nor document level security can be defined on the source index. The source
* index must be read only (index.blocks.write: true
).
*
* @param fn
* a function that initializes a builder to create the
* {@link DownsampleRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture downsample(
Function> fn) {
return downsample(fn.apply(new DownsampleRequest.Builder()).build());
}
// ----- Endpoint: indices.exists
/**
* Check indices. Check if one or more indices, index aliases, or data streams
* exist.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture exists(ExistsRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check indices. Check if one or more indices, index aliases, or data streams
* exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture exists(
Function> fn) {
return exists(fn.apply(new ExistsRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_alias
/**
* Check aliases.
*
* Check if one or more data stream or index aliases exist.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture existsAlias(ExistsAliasRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsAliasRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check aliases.
*
* Check if one or more data stream or index aliases exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsAliasRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture existsAlias(
Function> fn) {
return existsAlias(fn.apply(new ExistsAliasRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_index_template
/**
* Check index templates.
*
* Check whether index templates exist.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture existsIndexTemplate(ExistsIndexTemplateRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsIndexTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check index templates.
*
* Check whether index templates exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture existsIndexTemplate(
Function> fn) {
return existsIndexTemplate(fn.apply(new ExistsIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_template
/**
* Check existence of index templates. Get information about whether index
* templates exist. Index templates define settings, mappings, and aliases that
* can be applied automatically to new indices.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture existsTemplate(ExistsTemplateRequest request) {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Check existence of index templates. Get information about whether index
* templates exist. Index templates define settings, mappings, and aliases that
* can be applied automatically to new indices.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture existsTemplate(
Function> fn) {
return existsTemplate(fn.apply(new ExistsTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.explain_data_lifecycle
/**
* Get the status for a data stream lifecycle. Get information about an index or
* data stream's current data stream lifecycle status, such as time since index
* creation, time since rollover, the lifecycle configuration managing the
* index, or any errors encountered during lifecycle execution.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture explainDataLifecycle(ExplainDataLifecycleRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ExplainDataLifecycleRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get the status for a data stream lifecycle. Get information about an index or
* data stream's current data stream lifecycle status, such as time since index
* creation, time since rollover, the lifecycle configuration managing the
* index, or any errors encountered during lifecycle execution.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture explainDataLifecycle(
Function> fn) {
return explainDataLifecycle(fn.apply(new ExplainDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.field_usage_stats
/**
* Get field usage stats. Get field usage information for each shard and field
* of an index. Field usage statistics are automatically captured when queries
* are running on a cluster. A shard-level search request that accesses a given
* field, even if multiple times during that request, is counted as a single
* use.
*
* The response body reports the per-shard usage count of the data structures
* that back the fields in the index. A given request will increment each count
* by a maximum value of 1, even if the request accesses the same field multiple
* times.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture fieldUsageStats(FieldUsageStatsRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FieldUsageStatsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get field usage stats. Get field usage information for each shard and field
* of an index. Field usage statistics are automatically captured when queries
* are running on a cluster. A shard-level search request that accesses a given
* field, even if multiple times during that request, is counted as a single
* use.
*
* The response body reports the per-shard usage count of the data structures
* that back the fields in the index. A given request will increment each count
* by a maximum value of 1, even if the request accesses the same field multiple
* times.
*
* @param fn
* a function that initializes a builder to create the
* {@link FieldUsageStatsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture fieldUsageStats(
Function> fn) {
return fieldUsageStats(fn.apply(new FieldUsageStatsRequest.Builder()).build());
}
// ----- Endpoint: indices.flush
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture flush(FlushRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FlushRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @param fn
* a function that initializes a builder to create the
* {@link FlushRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture flush(
Function> fn) {
return flush(fn.apply(new FlushRequest.Builder()).build());
}
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture flush() {
return this.transport.performRequestAsync(new FlushRequest.Builder().build(), FlushRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.forcemerge
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture forcemerge(ForcemergeRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ForcemergeRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link ForcemergeRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture forcemerge(
Function> fn) {
return forcemerge(fn.apply(new ForcemergeRequest.Builder()).build());
}
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture forcemerge() {
return this.transport.performRequestAsync(new ForcemergeRequest.Builder().build(), ForcemergeRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get
/**
* Get index information. Get information about one or more indices. For data
* streams, the API returns information about the stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture get(GetIndexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get index information. Get information about one or more indices. For data
* streams, the API returns information about the stream’s backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture get(
Function> fn) {
return get(fn.apply(new GetIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.get_alias
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getAlias(GetAliasRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetAliasRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetAliasRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getAlias(
Function> fn) {
return getAlias(fn.apply(new GetAliasRequest.Builder()).build());
}
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getAlias() {
return this.transport.performRequestAsync(new GetAliasRequest.Builder().build(), GetAliasRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get_data_lifecycle
/**
* Get data stream lifecycles.
*
* Get the data stream lifecycle configuration of one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getDataLifecycle(GetDataLifecycleRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetDataLifecycleRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get data stream lifecycles.
*
* Get the data stream lifecycle configuration of one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getDataLifecycle(
Function> fn) {
return getDataLifecycle(fn.apply(new GetDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.get_data_lifecycle_stats
/**
* Get data stream lifecycle stats. Get statistics about the data streams that
* are managed by a data stream lifecycle.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getDataLifecycleStats() {
return this.transport.performRequestAsync(GetDataLifecycleStatsRequest._INSTANCE,
GetDataLifecycleStatsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_data_stream
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getDataStream(GetDataStreamRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getDataStream(
Function> fn) {
return getDataStream(fn.apply(new GetDataStreamRequest.Builder()).build());
}
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getDataStream() {
return this.transport.performRequestAsync(new GetDataStreamRequest.Builder().build(),
GetDataStreamRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_field_mapping
/**
* Get mapping definitions. Retrieves mapping definitions for one or more
* fields. For data streams, the API retrieves field mappings for the stream’s
* backing indices.
*
* This API is useful if you don't need a complete mapping or if an index
* mapping contains a large number of fields.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getFieldMapping(GetFieldMappingRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetFieldMappingRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get mapping definitions. Retrieves mapping definitions for one or more
* fields. For data streams, the API retrieves field mappings for the stream’s
* backing indices.
*
* This API is useful if you don't need a complete mapping or if an index
* mapping contains a large number of fields.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetFieldMappingRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getFieldMapping(
Function> fn) {
return getFieldMapping(fn.apply(new GetFieldMappingRequest.Builder()).build());
}
// ----- Endpoint: indices.get_index_template
/**
* Get index templates. Get information about one or more index templates.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getIndexTemplate(GetIndexTemplateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndexTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get index templates. Get information about one or more index templates.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getIndexTemplate(
Function> fn) {
return getIndexTemplate(fn.apply(new GetIndexTemplateRequest.Builder()).build());
}
/**
* Get index templates. Get information about one or more index templates.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getIndexTemplate() {
return this.transport.performRequestAsync(new GetIndexTemplateRequest.Builder().build(),
GetIndexTemplateRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_mapping
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getMapping(GetMappingRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetMappingRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetMappingRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getMapping(
Function> fn) {
return getMapping(fn.apply(new GetMappingRequest.Builder()).build());
}
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getMapping() {
return this.transport.performRequestAsync(new GetMappingRequest.Builder().build(), GetMappingRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get_migrate_reindex_status
/**
* Get the migration reindexing status.
*
* Get the status of a migration reindex attempt for a data stream or index.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getMigrateReindexStatus(
GetMigrateReindexStatusRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetMigrateReindexStatusRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get the migration reindexing status.
*
* Get the status of a migration reindex attempt for a data stream or index.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetMigrateReindexStatusRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getMigrateReindexStatus(
Function> fn) {
return getMigrateReindexStatus(fn.apply(new GetMigrateReindexStatusRequest.Builder()).build());
}
// ----- Endpoint: indices.get_settings
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getSettings(GetIndicesSettingsRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndicesSettingsRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndicesSettingsRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getSettings(
Function> fn) {
return getSettings(fn.apply(new GetIndicesSettingsRequest.Builder()).build());
}
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getSettings() {
return this.transport.performRequestAsync(new GetIndicesSettingsRequest.Builder().build(),
GetIndicesSettingsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_template
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getTemplate(GetTemplateRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetTemplateRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final CompletableFuture getTemplate(
Function> fn) {
return getTemplate(fn.apply(new GetTemplateRequest.Builder()).build());
}
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture getTemplate() {
return this.transport.performRequestAsync(new GetTemplateRequest.Builder().build(),
GetTemplateRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.migrate_reindex
/**
* Reindex legacy backing indices.
*
* Reindex all legacy backing indices for a data stream. This operation occurs
* in a persistent task. The persistent task ID is returned immediately and the
* reindexing work is completed in that task.
*
* @see Documentation
* on elastic.co
*/
public CompletableFuture migrateReindex(MigrateReindexRequest request) {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) MigrateReindexRequest._ENDPOINT;
return this.transport.performRequestAsync(request, endpoint, this.transportOptions);
}
/**
* Reindex legacy backing indices.
*
* Reindex all legacy backing indices for a data stream. This operation occurs
* in a persistent task. The persistent task ID is returned immediately and the
* reindexing work is completed in that task.
*
* @param fn
* a function that initializes a builder to create the
* {@link MigrateReindexRequest}
* @see