All Downloads are FREE. Search and download functionalities are using the official Maven repository.

co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesAsyncClient Maven / Gradle / Ivy

The newest version!
/*
 * Licensed to Elasticsearch B.V. under one or more contributor
 * license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright
 * ownership. Elasticsearch B.V. licenses this file to you under
 * the Apache License, Version 2.0 (the "License"); you may
 * not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

package co.elastic.clients.elasticsearch.indices;

import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.util.ObjectBuilder;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.annotation.Nullable;

//----------------------------------------------------------------
//       THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------

/**
 * Client for the indices namespace.
 */
public class ElasticsearchIndicesAsyncClient
		extends
			ApiClient {

	public ElasticsearchIndicesAsyncClient(ElasticsearchTransport transport) {
		super(transport, null);
	}

	public ElasticsearchIndicesAsyncClient(ElasticsearchTransport transport,
			@Nullable TransportOptions transportOptions) {
		super(transport, transportOptions);
	}

	@Override
	public ElasticsearchIndicesAsyncClient withTransportOptions(@Nullable TransportOptions transportOptions) {
		return new ElasticsearchIndicesAsyncClient(this.transport, transportOptions);
	}

	// ----- Endpoint: indices.add_block

	/**
	 * Add an index block.
	 * 

* Add an index block to an index. Index blocks limit the operations allowed on * an index by blocking specific operation types. * * @see Documentation * on elastic.co */ public CompletableFuture addBlock(AddBlockRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) AddBlockRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Add an index block. *

* Add an index block to an index. Index blocks limit the operations allowed on * an index by blocking specific operation types. * * @param fn * a function that initializes a builder to create the * {@link AddBlockRequest} * @see Documentation * on elastic.co */ public final CompletableFuture addBlock( Function> fn) { return addBlock(fn.apply(new AddBlockRequest.Builder()).build()); } // ----- Endpoint: indices.analyze /** * Get tokens from text analysis. The analyze API performs analysis on a text * string and returns the resulting tokens. *

* Generating excessive amount of tokens may cause a node to run out of memory. * The index.analyze.max_token_count setting enables you to limit * the number of tokens that can be produced. If more than this limit of tokens * gets generated, an error occurs. The _analyze endpoint without a * specified index will always use 10000 as its limit. * * @see Documentation * on elastic.co */ public CompletableFuture analyze(AnalyzeRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) AnalyzeRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get tokens from text analysis. The analyze API performs analysis on a text * string and returns the resulting tokens. *

* Generating excessive amount of tokens may cause a node to run out of memory. * The index.analyze.max_token_count setting enables you to limit * the number of tokens that can be produced. If more than this limit of tokens * gets generated, an error occurs. The _analyze endpoint without a * specified index will always use 10000 as its limit. * * @param fn * a function that initializes a builder to create the * {@link AnalyzeRequest} * @see Documentation * on elastic.co */ public final CompletableFuture analyze( Function> fn) { return analyze(fn.apply(new AnalyzeRequest.Builder()).build()); } /** * Get tokens from text analysis. The analyze API performs analysis on a text * string and returns the resulting tokens. *

* Generating excessive amount of tokens may cause a node to run out of memory. * The index.analyze.max_token_count setting enables you to limit * the number of tokens that can be produced. If more than this limit of tokens * gets generated, an error occurs. The _analyze endpoint without a * specified index will always use 10000 as its limit. * * @see Documentation * on elastic.co */ public CompletableFuture analyze() { return this.transport.performRequestAsync(new AnalyzeRequest.Builder().build(), AnalyzeRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.cancel_migrate_reindex /** * Cancel a migration reindex operation. *

* Cancel a migration reindex attempt for a data stream or index. * * @see Documentation * on elastic.co */ public CompletableFuture cancelMigrateReindex(CancelMigrateReindexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CancelMigrateReindexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Cancel a migration reindex operation. *

* Cancel a migration reindex attempt for a data stream or index. * * @param fn * a function that initializes a builder to create the * {@link CancelMigrateReindexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture cancelMigrateReindex( Function> fn) { return cancelMigrateReindex(fn.apply(new CancelMigrateReindexRequest.Builder()).build()); } // ----- Endpoint: indices.clear_cache /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. *

* By default, the clear cache API clears all caches. To clear only specific * caches, use the fielddata, query, or * request parameters. To clear the cache only of specific fields, * use the fields parameter. * * @see Documentation * on elastic.co */ public CompletableFuture clearCache(ClearCacheRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ClearCacheRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. *

* By default, the clear cache API clears all caches. To clear only specific * caches, use the fielddata, query, or * request parameters. To clear the cache only of specific fields, * use the fields parameter. * * @param fn * a function that initializes a builder to create the * {@link ClearCacheRequest} * @see Documentation * on elastic.co */ public final CompletableFuture clearCache( Function> fn) { return clearCache(fn.apply(new ClearCacheRequest.Builder()).build()); } /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. *

* By default, the clear cache API clears all caches. To clear only specific * caches, use the fielddata, query, or * request parameters. To clear the cache only of specific fields, * use the fields parameter. * * @see Documentation * on elastic.co */ public CompletableFuture clearCache() { return this.transport.performRequestAsync(new ClearCacheRequest.Builder().build(), ClearCacheRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.clone /** * Clone an index. Clone an existing index into a new index. Each original * primary shard is cloned into a new primary shard in the new index. *

* IMPORTANT: Elasticsearch does not apply index templates to the resulting * index. The API also does not copy index metadata from the original index. * Index metadata includes aliases, index lifecycle management phase * definitions, and cross-cluster replication (CCR) follower information. For * example, if you clone a CCR follower index, the resulting clone will not be a * follower index. *

* The clone API copies most index settings from the source index to the * resulting index, with the exception of index.number_of_replicas * and index.auto_expand_replicas. To set the number of replicas in * the resulting index, configure these settings in the clone request. *

* Cloning works as follows: *

    *
  • First, it creates a new target index with the same definition as the * source index.
  • *
  • Then it hard-links segments from the source index into the target index. * If the file system does not support hard-linking, all segments are copied * into the new index, which is a much more time consuming process.
  • *
  • Finally, it recovers the target index as though it were a closed index * which had just been re-opened.
  • *
*

* IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

    *
  • The index must be marked as read-only and have a cluster health status of * green.
  • *
  • The target index must not exist.
  • *
  • The source index must have the same number of primary shards as the * target index.
  • *
  • The node handling the clone process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
*

* The current write index on a data stream cannot be cloned. In order to clone * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be cloned. *

* NOTE: Mappings cannot be specified in the _clone request. The * mappings of the source index will be used for the target index. *

* Monitor the cloning process *

* The cloning process can be monitored with the cat recovery API or the cluster * health API can be used to wait until all primary shards have been allocated * by setting the wait_for_status parameter to yellow. *

* The _clone API returns as soon as the target index has been * added to the cluster state, before any shards have been allocated. At this * point, all shards are in the state unassigned. If, for any reason, the target * index can't be allocated, its primary shard will remain unassigned until it * can be allocated on that node. *

* Once the primary shard is allocated, it moves to state initializing, and the * clone process begins. When the clone operation completes, the shard will * become active. At that point, Elasticsearch will try to allocate any replicas * and may decide to relocate the primary shard to another node. *

* Wait for active shards *

* Because the clone operation creates a new index to clone the shards to, the * wait for active shards setting on index creation applies to the clone index * action as well. * * @see Documentation * on elastic.co */ public CompletableFuture clone(CloneIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CloneIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Clone an index. Clone an existing index into a new index. Each original * primary shard is cloned into a new primary shard in the new index. *

* IMPORTANT: Elasticsearch does not apply index templates to the resulting * index. The API also does not copy index metadata from the original index. * Index metadata includes aliases, index lifecycle management phase * definitions, and cross-cluster replication (CCR) follower information. For * example, if you clone a CCR follower index, the resulting clone will not be a * follower index. *

* The clone API copies most index settings from the source index to the * resulting index, with the exception of index.number_of_replicas * and index.auto_expand_replicas. To set the number of replicas in * the resulting index, configure these settings in the clone request. *

* Cloning works as follows: *

    *
  • First, it creates a new target index with the same definition as the * source index.
  • *
  • Then it hard-links segments from the source index into the target index. * If the file system does not support hard-linking, all segments are copied * into the new index, which is a much more time consuming process.
  • *
  • Finally, it recovers the target index as though it were a closed index * which had just been re-opened.
  • *
*

* IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

    *
  • The index must be marked as read-only and have a cluster health status of * green.
  • *
  • The target index must not exist.
  • *
  • The source index must have the same number of primary shards as the * target index.
  • *
  • The node handling the clone process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
*

* The current write index on a data stream cannot be cloned. In order to clone * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be cloned. *

* NOTE: Mappings cannot be specified in the _clone request. The * mappings of the source index will be used for the target index. *

* Monitor the cloning process *

* The cloning process can be monitored with the cat recovery API or the cluster * health API can be used to wait until all primary shards have been allocated * by setting the wait_for_status parameter to yellow. *

* The _clone API returns as soon as the target index has been * added to the cluster state, before any shards have been allocated. At this * point, all shards are in the state unassigned. If, for any reason, the target * index can't be allocated, its primary shard will remain unassigned until it * can be allocated on that node. *

* Once the primary shard is allocated, it moves to state initializing, and the * clone process begins. When the clone operation completes, the shard will * become active. At that point, Elasticsearch will try to allocate any replicas * and may decide to relocate the primary shard to another node. *

* Wait for active shards *

* Because the clone operation creates a new index to clone the shards to, the * wait for active shards setting on index creation applies to the clone index * action as well. * * @param fn * a function that initializes a builder to create the * {@link CloneIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture clone( Function> fn) { return clone(fn.apply(new CloneIndexRequest.Builder()).build()); } // ----- Endpoint: indices.close /** * Close an index. A closed index is blocked for read or write operations and * does not allow all operations that opened indices allow. It is not possible * to index documents or to search for documents in a closed index. Closed * indices do not have to maintain internal data structures for indexing or * searching documents, which results in a smaller overhead on the cluster. *

* When opening or closing an index, the master node is responsible for * restarting the index shards to reflect the new state of the index. The shards * will then go through the normal recovery process. The data of opened and * closed indices is automatically replicated by the cluster to ensure that * enough shard copies are safely kept around at all times. *

* You can open and close multiple indices. An error is thrown if the request * explicitly refers to a missing index. This behaviour can be turned off using * the ignore_unavailable=true parameter. *

* By default, you must explicitly name the indices you are opening or closing. * To open or close indices with _all, *, or other * wildcard expressions, change * the action.destructive_requires_name setting to * false. This setting can also be changed with the cluster update * settings API. *

* Closed indices consume a significant amount of disk-space which can cause * problems in managed environments. Closing indices can be turned off with the * cluster settings API by setting cluster.indices.close.enable to * false. * * @see Documentation * on elastic.co */ public CompletableFuture close(CloseIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CloseIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Close an index. A closed index is blocked for read or write operations and * does not allow all operations that opened indices allow. It is not possible * to index documents or to search for documents in a closed index. Closed * indices do not have to maintain internal data structures for indexing or * searching documents, which results in a smaller overhead on the cluster. *

* When opening or closing an index, the master node is responsible for * restarting the index shards to reflect the new state of the index. The shards * will then go through the normal recovery process. The data of opened and * closed indices is automatically replicated by the cluster to ensure that * enough shard copies are safely kept around at all times. *

* You can open and close multiple indices. An error is thrown if the request * explicitly refers to a missing index. This behaviour can be turned off using * the ignore_unavailable=true parameter. *

* By default, you must explicitly name the indices you are opening or closing. * To open or close indices with _all, *, or other * wildcard expressions, change * the action.destructive_requires_name setting to * false. This setting can also be changed with the cluster update * settings API. *

* Closed indices consume a significant amount of disk-space which can cause * problems in managed environments. Closing indices can be turned off with the * cluster settings API by setting cluster.indices.close.enable to * false. * * @param fn * a function that initializes a builder to create the * {@link CloseIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture close( Function> fn) { return close(fn.apply(new CloseIndexRequest.Builder()).build()); } // ----- Endpoint: indices.create /** * Create an index. You can use the create index API to add a new index to an * Elasticsearch cluster. When creating an index, you can specify the following: *

    *
  • Settings for the index.
  • *
  • Mappings for fields in the index.
  • *
  • Index aliases
  • *
*

* Wait for active shards *

* By default, index creation will only return a response to the client when the * primary copies of each shard have been started, or the request times out. The * index creation response will indicate what happened. For example, * acknowledged indicates whether the index was successfully * created in the cluster, while shards_acknowledged indicates * whether the requisite number of shard copies were started for each shard in * the index before timing out. Note that it is still possible for either * acknowledged or shards_acknowledged to be * false, but for the index creation to be successful. These values * simply indicate whether the operation completed before the timeout. If * acknowledged is false, the request timed out before the cluster * state was updated with the newly created index, but it probably will be * created sometime soon. If shards_acknowledged is false, then the * request timed out before the requisite number of shards were started (by * default just the primaries), even if the cluster state was successfully * updated to reflect the newly created index (that is to say, * acknowledged is true). *

* You can change the default of only waiting for the primary shards to start * through the index setting index.write.wait_for_active_shards. * Note that changing this setting will also affect the * wait_for_active_shards value on all subsequent write operations. * * @see Documentation * on elastic.co */ public CompletableFuture create(CreateIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CreateIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create an index. You can use the create index API to add a new index to an * Elasticsearch cluster. When creating an index, you can specify the following: *

    *
  • Settings for the index.
  • *
  • Mappings for fields in the index.
  • *
  • Index aliases
  • *
*

* Wait for active shards *

* By default, index creation will only return a response to the client when the * primary copies of each shard have been started, or the request times out. The * index creation response will indicate what happened. For example, * acknowledged indicates whether the index was successfully * created in the cluster, while shards_acknowledged indicates * whether the requisite number of shard copies were started for each shard in * the index before timing out. Note that it is still possible for either * acknowledged or shards_acknowledged to be * false, but for the index creation to be successful. These values * simply indicate whether the operation completed before the timeout. If * acknowledged is false, the request timed out before the cluster * state was updated with the newly created index, but it probably will be * created sometime soon. If shards_acknowledged is false, then the * request timed out before the requisite number of shards were started (by * default just the primaries), even if the cluster state was successfully * updated to reflect the newly created index (that is to say, * acknowledged is true). *

* You can change the default of only waiting for the primary shards to start * through the index setting index.write.wait_for_active_shards. * Note that changing this setting will also affect the * wait_for_active_shards value on all subsequent write operations. * * @param fn * a function that initializes a builder to create the * {@link CreateIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture create( Function> fn) { return create(fn.apply(new CreateIndexRequest.Builder()).build()); } // ----- Endpoint: indices.create_data_stream /** * Create a data stream. *

* You must have a matching index template with data stream enabled. * * @see Documentation * on elastic.co */ public CompletableFuture createDataStream(CreateDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CreateDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create a data stream. *

* You must have a matching index template with data stream enabled. * * @param fn * a function that initializes a builder to create the * {@link CreateDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture createDataStream( Function> fn) { return createDataStream(fn.apply(new CreateDataStreamRequest.Builder()).build()); } // ----- Endpoint: indices.create_from /** * Create an index from a source index. *

* Copy the mappings and settings from the source index to a destination index * while allowing request settings and mappings to override the source values. * * @see Documentation * on elastic.co */ public CompletableFuture createFrom(CreateFromRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) CreateFromRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create an index from a source index. *

* Copy the mappings and settings from the source index to a destination index * while allowing request settings and mappings to override the source values. * * @param fn * a function that initializes a builder to create the * {@link CreateFromRequest} * @see Documentation * on elastic.co */ public final CompletableFuture createFrom( Function> fn) { return createFrom(fn.apply(new CreateFromRequest.Builder()).build()); } // ----- Endpoint: indices.data_streams_stats /** * Get data stream stats. *

* Get statistics for one or more data streams. * * @see Documentation * on elastic.co */ public CompletableFuture dataStreamsStats(DataStreamsStatsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DataStreamsStatsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get data stream stats. *

* Get statistics for one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link DataStreamsStatsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture dataStreamsStats( Function> fn) { return dataStreamsStats(fn.apply(new DataStreamsStatsRequest.Builder()).build()); } /** * Get data stream stats. *

* Get statistics for one or more data streams. * * @see Documentation * on elastic.co */ public CompletableFuture dataStreamsStats() { return this.transport.performRequestAsync(new DataStreamsStatsRequest.Builder().build(), DataStreamsStatsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.delete /** * Delete indices. Deleting an index deletes its documents, shards, and * metadata. It does not delete related Kibana components, such as data views, * visualizations, or dashboards. *

* You cannot delete the current write index of a data stream. To delete the * index, you must roll over the data stream so a new write index is created. * You can then use the delete index API to delete the previous write index. * * @see Documentation * on elastic.co */ public CompletableFuture delete(DeleteIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete indices. Deleting an index deletes its documents, shards, and * metadata. It does not delete related Kibana components, such as data views, * visualizations, or dashboards. *

* You cannot delete the current write index of a data stream. To delete the * index, you must roll over the data stream so a new write index is created. * You can then use the delete index API to delete the previous write index. * * @param fn * a function that initializes a builder to create the * {@link DeleteIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture delete( Function> fn) { return delete(fn.apply(new DeleteIndexRequest.Builder()).build()); } // ----- Endpoint: indices.delete_alias /** * Delete an alias. Removes a data stream or index from an alias. * * @see Documentation * on elastic.co */ public CompletableFuture deleteAlias(DeleteAliasRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteAliasRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete an alias. Removes a data stream or index from an alias. * * @param fn * a function that initializes a builder to create the * {@link DeleteAliasRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteAlias( Function> fn) { return deleteAlias(fn.apply(new DeleteAliasRequest.Builder()).build()); } // ----- Endpoint: indices.delete_data_lifecycle /** * Delete data stream lifecycles. Removes the data stream lifecycle from a data * stream, rendering it not managed by the data stream lifecycle. * * @see Documentation * on elastic.co */ public CompletableFuture deleteDataLifecycle(DeleteDataLifecycleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteDataLifecycleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete data stream lifecycles. Removes the data stream lifecycle from a data * stream, rendering it not managed by the data stream lifecycle. * * @param fn * a function that initializes a builder to create the * {@link DeleteDataLifecycleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteDataLifecycle( Function> fn) { return deleteDataLifecycle(fn.apply(new DeleteDataLifecycleRequest.Builder()).build()); } // ----- Endpoint: indices.delete_data_stream /** * Delete data streams. Deletes one or more data streams and their backing * indices. * * @see Documentation * on elastic.co */ public CompletableFuture deleteDataStream(DeleteDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete data streams. Deletes one or more data streams and their backing * indices. * * @param fn * a function that initializes a builder to create the * {@link DeleteDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteDataStream( Function> fn) { return deleteDataStream(fn.apply(new DeleteDataStreamRequest.Builder()).build()); } // ----- Endpoint: indices.delete_index_template /** * Delete an index template. The provided <index-template> may contain * multiple template names separated by a comma. If multiple template names are * specified then there is no wildcard support and the provided names should * match completely with existing templates. * * @see Documentation * on elastic.co */ public CompletableFuture deleteIndexTemplate(DeleteIndexTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete an index template. The provided <index-template> may contain * multiple template names separated by a comma. If multiple template names are * specified then there is no wildcard support and the provided names should * match completely with existing templates. * * @param fn * a function that initializes a builder to create the * {@link DeleteIndexTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteIndexTemplate( Function> fn) { return deleteIndexTemplate(fn.apply(new DeleteIndexTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.delete_template /** * Delete a legacy index template. IMPORTANT: This documentation is about legacy * index templates, which are deprecated and will be replaced by the composable * templates introduced in Elasticsearch 7.8. * * @see Documentation * on elastic.co */ public CompletableFuture deleteTemplate(DeleteTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DeleteTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Delete a legacy index template. IMPORTANT: This documentation is about legacy * index templates, which are deprecated and will be replaced by the composable * templates introduced in Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link DeleteTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture deleteTemplate( Function> fn) { return deleteTemplate(fn.apply(new DeleteTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.disk_usage /** * Analyze the index disk usage. Analyze the disk usage of each field of an * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. *

* NOTE: The total size of fields of the analyzed shards of the index in the * response is usually smaller than the index store_size value * because some small metadata files are ignored and some parts of data files * might not be scanned by the API. Since stored fields are stored together in a * compressed format, the sizes of stored fields are also estimates and can be * inaccurate. The stored size of the _id field is likely * underestimated while the _source field is overestimated. * * @see Documentation * on elastic.co */ public CompletableFuture diskUsage(DiskUsageRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DiskUsageRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Analyze the index disk usage. Analyze the disk usage of each field of an * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. *

* NOTE: The total size of fields of the analyzed shards of the index in the * response is usually smaller than the index store_size value * because some small metadata files are ignored and some parts of data files * might not be scanned by the API. Since stored fields are stored together in a * compressed format, the sizes of stored fields are also estimates and can be * inaccurate. The stored size of the _id field is likely * underestimated while the _source field is overestimated. * * @param fn * a function that initializes a builder to create the * {@link DiskUsageRequest} * @see Documentation * on elastic.co */ public final CompletableFuture diskUsage( Function> fn) { return diskUsage(fn.apply(new DiskUsageRequest.Builder()).build()); } // ----- Endpoint: indices.downsample /** * Downsample an index. Aggregate a time series (TSDS) index and store * pre-computed statistical summaries (min, max, * sum, value_count and avg) for each * metric field grouped by a configured time interval. For example, a TSDS index * that contains metrics sampled every 10 seconds can be downsampled to an * hourly index. All documents within an hour interval are summarized and stored * as a single document in the downsample index. *

* NOTE: Only indices in a time series data stream are supported. Neither field * nor document level security can be defined on the source index. The source * index must be read only (index.blocks.write: true). * * @see Documentation * on elastic.co */ public CompletableFuture downsample(DownsampleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) DownsampleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Downsample an index. Aggregate a time series (TSDS) index and store * pre-computed statistical summaries (min, max, * sum, value_count and avg) for each * metric field grouped by a configured time interval. For example, a TSDS index * that contains metrics sampled every 10 seconds can be downsampled to an * hourly index. All documents within an hour interval are summarized and stored * as a single document in the downsample index. *

* NOTE: Only indices in a time series data stream are supported. Neither field * nor document level security can be defined on the source index. The source * index must be read only (index.blocks.write: true). * * @param fn * a function that initializes a builder to create the * {@link DownsampleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture downsample( Function> fn) { return downsample(fn.apply(new DownsampleRequest.Builder()).build()); } // ----- Endpoint: indices.exists /** * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @see Documentation * on elastic.co */ public CompletableFuture exists(ExistsRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture exists( Function> fn) { return exists(fn.apply(new ExistsRequest.Builder()).build()); } // ----- Endpoint: indices.exists_alias /** * Check aliases. *

* Check if one or more data stream or index aliases exist. * * @see Documentation * on elastic.co */ public CompletableFuture existsAlias(ExistsAliasRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsAliasRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check aliases. *

* Check if one or more data stream or index aliases exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsAliasRequest} * @see Documentation * on elastic.co */ public final CompletableFuture existsAlias( Function> fn) { return existsAlias(fn.apply(new ExistsAliasRequest.Builder()).build()); } // ----- Endpoint: indices.exists_index_template /** * Check index templates. *

* Check whether index templates exist. * * @see Documentation * on elastic.co */ public CompletableFuture existsIndexTemplate(ExistsIndexTemplateRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsIndexTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check index templates. *

* Check whether index templates exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsIndexTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture existsIndexTemplate( Function> fn) { return existsIndexTemplate(fn.apply(new ExistsIndexTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.exists_template /** * Check existence of index templates. Get information about whether index * templates exist. Index templates define settings, mappings, and aliases that * can be applied automatically to new indices. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. * * @see Documentation * on elastic.co */ public CompletableFuture existsTemplate(ExistsTemplateRequest request) { @SuppressWarnings("unchecked") Endpoint endpoint = (Endpoint) ExistsTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Check existence of index templates. Get information about whether index * templates exist. Index templates define settings, mappings, and aliases that * can be applied automatically to new indices. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link ExistsTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture existsTemplate( Function> fn) { return existsTemplate(fn.apply(new ExistsTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.explain_data_lifecycle /** * Get the status for a data stream lifecycle. Get information about an index or * data stream's current data stream lifecycle status, such as time since index * creation, time since rollover, the lifecycle configuration managing the * index, or any errors encountered during lifecycle execution. * * @see Documentation * on elastic.co */ public CompletableFuture explainDataLifecycle(ExplainDataLifecycleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ExplainDataLifecycleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get the status for a data stream lifecycle. Get information about an index or * data stream's current data stream lifecycle status, such as time since index * creation, time since rollover, the lifecycle configuration managing the * index, or any errors encountered during lifecycle execution. * * @param fn * a function that initializes a builder to create the * {@link ExplainDataLifecycleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture explainDataLifecycle( Function> fn) { return explainDataLifecycle(fn.apply(new ExplainDataLifecycleRequest.Builder()).build()); } // ----- Endpoint: indices.field_usage_stats /** * Get field usage stats. Get field usage information for each shard and field * of an index. Field usage statistics are automatically captured when queries * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. *

* The response body reports the per-shard usage count of the data structures * that back the fields in the index. A given request will increment each count * by a maximum value of 1, even if the request accesses the same field multiple * times. * * @see Documentation * on elastic.co */ public CompletableFuture fieldUsageStats(FieldUsageStatsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) FieldUsageStatsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get field usage stats. Get field usage information for each shard and field * of an index. Field usage statistics are automatically captured when queries * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. *

* The response body reports the per-shard usage count of the data structures * that back the fields in the index. A given request will increment each count * by a maximum value of 1, even if the request accesses the same field multiple * times. * * @param fn * a function that initializes a builder to create the * {@link FieldUsageStatsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture fieldUsageStats( Function> fn) { return fieldUsageStats(fn.apply(new FieldUsageStatsRequest.Builder()).build()); } // ----- Endpoint: indices.flush /** * Flush data streams or indices. Flushing a data stream or index is the process * of making sure that any data that is currently only stored in the transaction * log is also permanently stored in the Lucene index. When restarting, * Elasticsearch replays any unflushed operations from the transaction log into * the Lucene index to bring it back into the state that it was in before the * restart. Elasticsearch automatically triggers flushes as needed, using * heuristics that trade off the size of the unflushed transaction log against * the cost of performing each flush. *

* After each operation has been flushed it is permanently stored in the Lucene * index. This may mean that there is no need to maintain an additional copy of * it in the transaction log. The transaction log is made up of multiple files, * called generations, and Elasticsearch will delete any generation files when * they are no longer needed, freeing up disk space. *

* It is also possible to trigger a flush on one or more indices using the flush * API, although it is rare for users to need to call this API directly. If you * call the flush API after indexing some documents then a successful response * indicates that Elasticsearch has flushed all the documents that were indexed * before the flush API was called. * * @see Documentation * on elastic.co */ public CompletableFuture flush(FlushRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) FlushRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Flush data streams or indices. Flushing a data stream or index is the process * of making sure that any data that is currently only stored in the transaction * log is also permanently stored in the Lucene index. When restarting, * Elasticsearch replays any unflushed operations from the transaction log into * the Lucene index to bring it back into the state that it was in before the * restart. Elasticsearch automatically triggers flushes as needed, using * heuristics that trade off the size of the unflushed transaction log against * the cost of performing each flush. *

* After each operation has been flushed it is permanently stored in the Lucene * index. This may mean that there is no need to maintain an additional copy of * it in the transaction log. The transaction log is made up of multiple files, * called generations, and Elasticsearch will delete any generation files when * they are no longer needed, freeing up disk space. *

* It is also possible to trigger a flush on one or more indices using the flush * API, although it is rare for users to need to call this API directly. If you * call the flush API after indexing some documents then a successful response * indicates that Elasticsearch has flushed all the documents that were indexed * before the flush API was called. * * @param fn * a function that initializes a builder to create the * {@link FlushRequest} * @see Documentation * on elastic.co */ public final CompletableFuture flush( Function> fn) { return flush(fn.apply(new FlushRequest.Builder()).build()); } /** * Flush data streams or indices. Flushing a data stream or index is the process * of making sure that any data that is currently only stored in the transaction * log is also permanently stored in the Lucene index. When restarting, * Elasticsearch replays any unflushed operations from the transaction log into * the Lucene index to bring it back into the state that it was in before the * restart. Elasticsearch automatically triggers flushes as needed, using * heuristics that trade off the size of the unflushed transaction log against * the cost of performing each flush. *

* After each operation has been flushed it is permanently stored in the Lucene * index. This may mean that there is no need to maintain an additional copy of * it in the transaction log. The transaction log is made up of multiple files, * called generations, and Elasticsearch will delete any generation files when * they are no longer needed, freeing up disk space. *

* It is also possible to trigger a flush on one or more indices using the flush * API, although it is rare for users to need to call this API directly. If you * call the flush API after indexing some documents then a successful response * indicates that Elasticsearch has flushed all the documents that were indexed * before the flush API was called. * * @see Documentation * on elastic.co */ public CompletableFuture flush() { return this.transport.performRequestAsync(new FlushRequest.Builder().build(), FlushRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.forcemerge /** * Force a merge. Perform the force merge operation on the shards of one or more * indices. For data streams, the API forces a merge on the shards of the * stream's backing indices. *

* Merging reduces the number of segments in each shard by merging some of them * together and also frees up the space used by deleted documents. Merging * normally happens automatically, but sometimes it is useful to trigger a merge * manually. *

* WARNING: We recommend force merging only a read-only index (meaning the index * is no longer receiving writes). When documents are updated or deleted, the * old version is not immediately removed but instead soft-deleted and marked * with a "tombstone". These soft-deleted documents are automatically * cleaned up during regular segment merges. But force merge can cause very * large (greater than 5 GB) segments to be produced, which are not eligible for * regular merges. So the number of soft-deleted documents can then grow * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. *

* Blocks during a force merge *

* Calls to this API block until the merge is complete (unless request contains * wait_for_completion=false). If the client connection is lost * before completion then the force merge process will continue in the * background. Any new requests to force merge the same indices will also block * until the ongoing force merge is complete. *

* Running force merge asynchronously *

* If the request contains wait_for_completion=false, Elasticsearch * performs some preflight checks, launches the request, and returns a task you * can use to get the status of the task. However, you can not cancel this task * as the force merge task is not cancelable. Elasticsearch creates a record of * this task as a document at _tasks/<task_id>. When you are * done with a task, you should delete the task document so Elasticsearch can * reclaim the space. *

* Force merging multiple indices *

* You can force merge multiple indices with a single request by targeting: *

    *
  • One or more data streams that contain multiple backing indices
  • *
  • Multiple indices
  • *
  • One or more aliases
  • *
  • All data streams and indices in a cluster
  • *
*

* Each targeted shard is force-merged separately using the force_merge * threadpool. By default each node only has a single force_merge * thread which means that the shards on that node are force-merged one at a * time. If you expand the force_merge threadpool on a node then it * will force merge its shards in parallel *

* Force merge makes the storage for the shard being merged temporarily * increase, as it may require free space up to triple its size in case * max_num_segments parameter is set to 1, to rewrite * all segments into a new one. *

* Data streams and time-based indices *

* Force-merging is useful for managing a data stream's older backing indices * and other time-based indices, particularly after a rollover. In these cases, * each index only receives indexing traffic for a certain period of time. Once * an index receive no more writes, its shards can be force-merged to a single * segment. This can be a good idea because single-segment shards can sometimes * use simpler and more efficient data structures to perform searches. For * example: * *

	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture forcemerge(ForcemergeRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ForcemergeRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Force a merge. Perform the force merge operation on the shards of one or more * indices. For data streams, the API forces a merge on the shards of the * stream's backing indices. *

* Merging reduces the number of segments in each shard by merging some of them * together and also frees up the space used by deleted documents. Merging * normally happens automatically, but sometimes it is useful to trigger a merge * manually. *

* WARNING: We recommend force merging only a read-only index (meaning the index * is no longer receiving writes). When documents are updated or deleted, the * old version is not immediately removed but instead soft-deleted and marked * with a "tombstone". These soft-deleted documents are automatically * cleaned up during regular segment merges. But force merge can cause very * large (greater than 5 GB) segments to be produced, which are not eligible for * regular merges. So the number of soft-deleted documents can then grow * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. *

* Blocks during a force merge *

* Calls to this API block until the merge is complete (unless request contains * wait_for_completion=false). If the client connection is lost * before completion then the force merge process will continue in the * background. Any new requests to force merge the same indices will also block * until the ongoing force merge is complete. *

* Running force merge asynchronously *

* If the request contains wait_for_completion=false, Elasticsearch * performs some preflight checks, launches the request, and returns a task you * can use to get the status of the task. However, you can not cancel this task * as the force merge task is not cancelable. Elasticsearch creates a record of * this task as a document at _tasks/<task_id>. When you are * done with a task, you should delete the task document so Elasticsearch can * reclaim the space. *

* Force merging multiple indices *

* You can force merge multiple indices with a single request by targeting: *

    *
  • One or more data streams that contain multiple backing indices
  • *
  • Multiple indices
  • *
  • One or more aliases
  • *
  • All data streams and indices in a cluster
  • *
*

* Each targeted shard is force-merged separately using the force_merge * threadpool. By default each node only has a single force_merge * thread which means that the shards on that node are force-merged one at a * time. If you expand the force_merge threadpool on a node then it * will force merge its shards in parallel *

* Force merge makes the storage for the shard being merged temporarily * increase, as it may require free space up to triple its size in case * max_num_segments parameter is set to 1, to rewrite * all segments into a new one. *

* Data streams and time-based indices *

* Force-merging is useful for managing a data stream's older backing indices * and other time-based indices, particularly after a rollover. In these cases, * each index only receives indexing traffic for a certain period of time. Once * an index receive no more writes, its shards can be force-merged to a single * segment. This can be a good idea because single-segment shards can sometimes * use simpler and more efficient data structures to perform searches. For * example: * *

	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
	 * 
	 * 
* * @param fn * a function that initializes a builder to create the * {@link ForcemergeRequest} * @see Documentation * on elastic.co */ public final CompletableFuture forcemerge( Function> fn) { return forcemerge(fn.apply(new ForcemergeRequest.Builder()).build()); } /** * Force a merge. Perform the force merge operation on the shards of one or more * indices. For data streams, the API forces a merge on the shards of the * stream's backing indices. *

* Merging reduces the number of segments in each shard by merging some of them * together and also frees up the space used by deleted documents. Merging * normally happens automatically, but sometimes it is useful to trigger a merge * manually. *

* WARNING: We recommend force merging only a read-only index (meaning the index * is no longer receiving writes). When documents are updated or deleted, the * old version is not immediately removed but instead soft-deleted and marked * with a "tombstone". These soft-deleted documents are automatically * cleaned up during regular segment merges. But force merge can cause very * large (greater than 5 GB) segments to be produced, which are not eligible for * regular merges. So the number of soft-deleted documents can then grow * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. *

* Blocks during a force merge *

* Calls to this API block until the merge is complete (unless request contains * wait_for_completion=false). If the client connection is lost * before completion then the force merge process will continue in the * background. Any new requests to force merge the same indices will also block * until the ongoing force merge is complete. *

* Running force merge asynchronously *

* If the request contains wait_for_completion=false, Elasticsearch * performs some preflight checks, launches the request, and returns a task you * can use to get the status of the task. However, you can not cancel this task * as the force merge task is not cancelable. Elasticsearch creates a record of * this task as a document at _tasks/<task_id>. When you are * done with a task, you should delete the task document so Elasticsearch can * reclaim the space. *

* Force merging multiple indices *

* You can force merge multiple indices with a single request by targeting: *

    *
  • One or more data streams that contain multiple backing indices
  • *
  • Multiple indices
  • *
  • One or more aliases
  • *
  • All data streams and indices in a cluster
  • *
*

* Each targeted shard is force-merged separately using the force_merge * threadpool. By default each node only has a single force_merge * thread which means that the shards on that node are force-merged one at a * time. If you expand the force_merge threadpool on a node then it * will force merge its shards in parallel *

* Force merge makes the storage for the shard being merged temporarily * increase, as it may require free space up to triple its size in case * max_num_segments parameter is set to 1, to rewrite * all segments into a new one. *

* Data streams and time-based indices *

* Force-merging is useful for managing a data stream's older backing indices * and other time-based indices, particularly after a rollover. In these cases, * each index only receives indexing traffic for a certain period of time. Once * an index receive no more writes, its shards can be force-merged to a single * segment. This can be a good idea because single-segment shards can sometimes * use simpler and more efficient data structures to perform searches. For * example: * *

	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
	 * 
	 * 
* * @see Documentation * on elastic.co */ public CompletableFuture forcemerge() { return this.transport.performRequestAsync(new ForcemergeRequest.Builder().build(), ForcemergeRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get /** * Get index information. Get information about one or more indices. For data * streams, the API returns information about the stream’s backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture get(GetIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index information. Get information about one or more indices. For data * streams, the API returns information about the stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture get( Function> fn) { return get(fn.apply(new GetIndexRequest.Builder()).build()); } // ----- Endpoint: indices.get_alias /** * Get aliases. Retrieves information for one or more data stream or index * aliases. * * @see Documentation * on elastic.co */ public CompletableFuture getAlias(GetAliasRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetAliasRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get aliases. Retrieves information for one or more data stream or index * aliases. * * @param fn * a function that initializes a builder to create the * {@link GetAliasRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getAlias( Function> fn) { return getAlias(fn.apply(new GetAliasRequest.Builder()).build()); } /** * Get aliases. Retrieves information for one or more data stream or index * aliases. * * @see Documentation * on elastic.co */ public CompletableFuture getAlias() { return this.transport.performRequestAsync(new GetAliasRequest.Builder().build(), GetAliasRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_data_lifecycle /** * Get data stream lifecycles. *

* Get the data stream lifecycle configuration of one or more data streams. * * @see Documentation * on elastic.co */ public CompletableFuture getDataLifecycle(GetDataLifecycleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetDataLifecycleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get data stream lifecycles. *

* Get the data stream lifecycle configuration of one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataLifecycleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getDataLifecycle( Function> fn) { return getDataLifecycle(fn.apply(new GetDataLifecycleRequest.Builder()).build()); } // ----- Endpoint: indices.get_data_lifecycle_stats /** * Get data stream lifecycle stats. Get statistics about the data streams that * are managed by a data stream lifecycle. * * @see Documentation * on elastic.co */ public CompletableFuture getDataLifecycleStats() { return this.transport.performRequestAsync(GetDataLifecycleStatsRequest._INSTANCE, GetDataLifecycleStatsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_data_stream /** * Get data streams. *

* Get information about one or more data streams. * * @see Documentation * on elastic.co */ public CompletableFuture getDataStream(GetDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get data streams. *

* Get information about one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getDataStream( Function> fn) { return getDataStream(fn.apply(new GetDataStreamRequest.Builder()).build()); } /** * Get data streams. *

* Get information about one or more data streams. * * @see Documentation * on elastic.co */ public CompletableFuture getDataStream() { return this.transport.performRequestAsync(new GetDataStreamRequest.Builder().build(), GetDataStreamRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_field_mapping /** * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. *

* This API is useful if you don't need a complete mapping or if an index * mapping contains a large number of fields. * * @see Documentation * on elastic.co */ public CompletableFuture getFieldMapping(GetFieldMappingRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetFieldMappingRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. *

* This API is useful if you don't need a complete mapping or if an index * mapping contains a large number of fields. * * @param fn * a function that initializes a builder to create the * {@link GetFieldMappingRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getFieldMapping( Function> fn) { return getFieldMapping(fn.apply(new GetFieldMappingRequest.Builder()).build()); } // ----- Endpoint: indices.get_index_template /** * Get index templates. Get information about one or more index templates. * * @see Documentation * on elastic.co */ public CompletableFuture getIndexTemplate(GetIndexTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetIndexTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index templates. Get information about one or more index templates. * * @param fn * a function that initializes a builder to create the * {@link GetIndexTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getIndexTemplate( Function> fn) { return getIndexTemplate(fn.apply(new GetIndexTemplateRequest.Builder()).build()); } /** * Get index templates. Get information about one or more index templates. * * @see Documentation * on elastic.co */ public CompletableFuture getIndexTemplate() { return this.transport.performRequestAsync(new GetIndexTemplateRequest.Builder().build(), GetIndexTemplateRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_mapping /** * Get mapping definitions. For data streams, the API retrieves mappings for the * stream’s backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture getMapping(GetMappingRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetMappingRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get mapping definitions. For data streams, the API retrieves mappings for the * stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetMappingRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getMapping( Function> fn) { return getMapping(fn.apply(new GetMappingRequest.Builder()).build()); } /** * Get mapping definitions. For data streams, the API retrieves mappings for the * stream’s backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture getMapping() { return this.transport.performRequestAsync(new GetMappingRequest.Builder().build(), GetMappingRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_migrate_reindex_status /** * Get the migration reindexing status. *

* Get the status of a migration reindex attempt for a data stream or index. * * @see Documentation * on elastic.co */ public CompletableFuture getMigrateReindexStatus( GetMigrateReindexStatusRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetMigrateReindexStatusRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get the migration reindexing status. *

* Get the status of a migration reindex attempt for a data stream or index. * * @param fn * a function that initializes a builder to create the * {@link GetMigrateReindexStatusRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getMigrateReindexStatus( Function> fn) { return getMigrateReindexStatus(fn.apply(new GetMigrateReindexStatusRequest.Builder()).build()); } // ----- Endpoint: indices.get_settings /** * Get index settings. Get setting information for one or more indices. For data * streams, it returns setting information for the stream's backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture getSettings(GetIndicesSettingsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetIndicesSettingsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index settings. Get setting information for one or more indices. For data * streams, it returns setting information for the stream's backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndicesSettingsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getSettings( Function> fn) { return getSettings(fn.apply(new GetIndicesSettingsRequest.Builder()).build()); } /** * Get index settings. Get setting information for one or more indices. For data * streams, it returns setting information for the stream's backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture getSettings() { return this.transport.performRequestAsync(new GetIndicesSettingsRequest.Builder().build(), GetIndicesSettingsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.get_template /** * Get legacy index templates. Get information about one or more index * templates. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. * * @see Documentation * on elastic.co */ public CompletableFuture getTemplate(GetTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) GetTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get legacy index templates. Get information about one or more index * templates. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link GetTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture getTemplate( Function> fn) { return getTemplate(fn.apply(new GetTemplateRequest.Builder()).build()); } /** * Get legacy index templates. Get information about one or more index * templates. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. * * @see Documentation * on elastic.co */ public CompletableFuture getTemplate() { return this.transport.performRequestAsync(new GetTemplateRequest.Builder().build(), GetTemplateRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.migrate_reindex /** * Reindex legacy backing indices. *

* Reindex all legacy backing indices for a data stream. This operation occurs * in a persistent task. The persistent task ID is returned immediately and the * reindexing work is completed in that task. * * @see Documentation * on elastic.co */ public CompletableFuture migrateReindex(MigrateReindexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) MigrateReindexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Reindex legacy backing indices. *

* Reindex all legacy backing indices for a data stream. This operation occurs * in a persistent task. The persistent task ID is returned immediately and the * reindexing work is completed in that task. * * @param fn * a function that initializes a builder to create the * {@link MigrateReindexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture migrateReindex( Function> fn) { return migrateReindex(fn.apply(new MigrateReindexRequest.Builder()).build()); } /** * Reindex legacy backing indices. *

* Reindex all legacy backing indices for a data stream. This operation occurs * in a persistent task. The persistent task ID is returned immediately and the * reindexing work is completed in that task. * * @see Documentation * on elastic.co */ public CompletableFuture migrateReindex() { return this.transport.performRequestAsync(new MigrateReindexRequest.Builder().build(), MigrateReindexRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.migrate_to_data_stream /** * Convert an index alias to a data stream. Converts an index alias to a data * stream. You must have a matching index template that is data stream enabled. * The alias must meet the following criteria: The alias must have a write * index; All indices for the alias must have a @timestamp field * mapping of a date or date_nanos field type; The * alias must not have any filters; The alias must not use custom routing. If * successful, the request removes the alias and creates a data stream with the * same name. The indices for the alias become hidden backing indices for the * stream. The write index for the alias becomes the write index for the stream. * * @see Documentation * on elastic.co */ public CompletableFuture migrateToDataStream(MigrateToDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) MigrateToDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Convert an index alias to a data stream. Converts an index alias to a data * stream. You must have a matching index template that is data stream enabled. * The alias must meet the following criteria: The alias must have a write * index; All indices for the alias must have a @timestamp field * mapping of a date or date_nanos field type; The * alias must not have any filters; The alias must not use custom routing. If * successful, the request removes the alias and creates a data stream with the * same name. The indices for the alias become hidden backing indices for the * stream. The write index for the alias becomes the write index for the stream. * * @param fn * a function that initializes a builder to create the * {@link MigrateToDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture migrateToDataStream( Function> fn) { return migrateToDataStream(fn.apply(new MigrateToDataStreamRequest.Builder()).build()); } // ----- Endpoint: indices.modify_data_stream /** * Update data streams. Performs one or more data stream modification actions in * a single atomic operation. * * @see Documentation * on elastic.co */ public CompletableFuture modifyDataStream(ModifyDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ModifyDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update data streams. Performs one or more data stream modification actions in * a single atomic operation. * * @param fn * a function that initializes a builder to create the * {@link ModifyDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture modifyDataStream( Function> fn) { return modifyDataStream(fn.apply(new ModifyDataStreamRequest.Builder()).build()); } // ----- Endpoint: indices.open /** * Open a closed index. For data streams, the API opens any closed backing * indices. *

* A closed index is blocked for read/write operations and does not allow all * operations that opened indices allow. It is not possible to index documents * or to search for documents in a closed index. This allows closed indices to * not have to maintain internal data structures for indexing or searching * documents, resulting in a smaller overhead on the cluster. *

* When opening or closing an index, the master is responsible for restarting * the index shards to reflect the new state of the index. The shards will then * go through the normal recovery process. The data of opened or closed indices * is automatically replicated by the cluster to ensure that enough shard copies * are safely kept around at all times. *

* You can open and close multiple indices. An error is thrown if the request * explicitly refers to a missing index. This behavior can be turned off by * using the ignore_unavailable=true parameter. *

* By default, you must explicitly name the indices you are opening or closing. * To open or close indices with _all, *, or other * wildcard expressions, change the * action.destructive_requires_name setting to false. * This setting can also be changed with the cluster update settings API. *

* Closed indices consume a significant amount of disk-space which can cause * problems in managed environments. Closing indices can be turned off with the * cluster settings API by setting cluster.indices.close.enable to * false. *

* Because opening or closing an index allocates its shards, the * wait_for_active_shards setting on index creation applies to the * _open and _close index actions as well. * * @see Documentation * on elastic.co */ public CompletableFuture open(OpenRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) OpenRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Open a closed index. For data streams, the API opens any closed backing * indices. *

* A closed index is blocked for read/write operations and does not allow all * operations that opened indices allow. It is not possible to index documents * or to search for documents in a closed index. This allows closed indices to * not have to maintain internal data structures for indexing or searching * documents, resulting in a smaller overhead on the cluster. *

* When opening or closing an index, the master is responsible for restarting * the index shards to reflect the new state of the index. The shards will then * go through the normal recovery process. The data of opened or closed indices * is automatically replicated by the cluster to ensure that enough shard copies * are safely kept around at all times. *

* You can open and close multiple indices. An error is thrown if the request * explicitly refers to a missing index. This behavior can be turned off by * using the ignore_unavailable=true parameter. *

* By default, you must explicitly name the indices you are opening or closing. * To open or close indices with _all, *, or other * wildcard expressions, change the * action.destructive_requires_name setting to false. * This setting can also be changed with the cluster update settings API. *

* Closed indices consume a significant amount of disk-space which can cause * problems in managed environments. Closing indices can be turned off with the * cluster settings API by setting cluster.indices.close.enable to * false. *

* Because opening or closing an index allocates its shards, the * wait_for_active_shards setting on index creation applies to the * _open and _close index actions as well. * * @param fn * a function that initializes a builder to create the * {@link OpenRequest} * @see Documentation * on elastic.co */ public final CompletableFuture open(Function> fn) { return open(fn.apply(new OpenRequest.Builder()).build()); } // ----- Endpoint: indices.promote_data_stream /** * Promote a data stream. Promote a data stream from a replicated data stream * managed by cross-cluster replication (CCR) to a regular data stream. *

* With CCR auto following, a data stream from a remote cluster can be * replicated to the local cluster. These data streams can't be rolled over in * the local cluster. These replicated data streams roll over only if the * upstream data stream rolls over. In the event that the remote cluster is no * longer available, the data stream in the local cluster can be promoted to a * regular data stream, which allows these data streams to be rolled over in the * local cluster. *

* NOTE: When promoting a data stream, ensure the local cluster has a data * stream enabled index template that matches the data stream. If this is * missing, the data stream will not be able to roll over until a matching index * template is created. This will affect the lifecycle management of the data * stream and interfere with the data stream size and retention. * * @see Documentation * on elastic.co */ public CompletableFuture promoteDataStream(PromoteDataStreamRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PromoteDataStreamRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Promote a data stream. Promote a data stream from a replicated data stream * managed by cross-cluster replication (CCR) to a regular data stream. *

* With CCR auto following, a data stream from a remote cluster can be * replicated to the local cluster. These data streams can't be rolled over in * the local cluster. These replicated data streams roll over only if the * upstream data stream rolls over. In the event that the remote cluster is no * longer available, the data stream in the local cluster can be promoted to a * regular data stream, which allows these data streams to be rolled over in the * local cluster. *

* NOTE: When promoting a data stream, ensure the local cluster has a data * stream enabled index template that matches the data stream. If this is * missing, the data stream will not be able to roll over until a matching index * template is created. This will affect the lifecycle management of the data * stream and interfere with the data stream size and retention. * * @param fn * a function that initializes a builder to create the * {@link PromoteDataStreamRequest} * @see Documentation * on elastic.co */ public final CompletableFuture promoteDataStream( Function> fn) { return promoteDataStream(fn.apply(new PromoteDataStreamRequest.Builder()).build()); } // ----- Endpoint: indices.put_alias /** * Create or update an alias. Adds a data stream or index to an alias. * * @see Documentation * on elastic.co */ public CompletableFuture putAlias(PutAliasRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutAliasRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update an alias. Adds a data stream or index to an alias. * * @param fn * a function that initializes a builder to create the * {@link PutAliasRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putAlias( Function> fn) { return putAlias(fn.apply(new PutAliasRequest.Builder()).build()); } // ----- Endpoint: indices.put_data_lifecycle /** * Update data stream lifecycles. Update the data stream lifecycle of the * specified data streams. * * @see Documentation * on elastic.co */ public CompletableFuture putDataLifecycle(PutDataLifecycleRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutDataLifecycleRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update data stream lifecycles. Update the data stream lifecycle of the * specified data streams. * * @param fn * a function that initializes a builder to create the * {@link PutDataLifecycleRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putDataLifecycle( Function> fn) { return putDataLifecycle(fn.apply(new PutDataLifecycleRequest.Builder()).build()); } // ----- Endpoint: indices.put_index_template /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. *

* Elasticsearch applies templates to new indices based on an wildcard pattern * that matches the index name. Index templates are applied during data stream * or index creation. For data streams, these settings and mappings are applied * when the stream's backing indices are created. Settings and mappings * specified in a create index API request override any settings or mappings * specified in an index template. Changes to index templates do not affect * existing indices, including the existing backing indices of a data stream. *

* You can use C-style /* *\/ block comments in index templates. * You can include comments anywhere in the request body, except before the * opening curly bracket. *

* Multiple matching templates *

* If multiple index templates match the name of a new index or data stream, the * template with the highest priority is used. *

* Multiple templates with overlapping index patterns at the same priority are * not allowed and an error will be thrown when attempting to create a template * matching an existing index template at identical priorities. *

* Composing aliases, mappings, and settings *

* When multiple component templates are specified in the * composed_of field for an index template, they are merged in the * order specified, meaning that later component templates override earlier * component templates. Any mappings, settings, or aliases from the parent index * template are merged in next. Finally, any configuration on the index request * itself is merged. Mapping definitions are merged recursively, which means * that later mapping components can introduce new field mappings and update the * mapping configuration. If a field mapping is already contained in an earlier * component, its definition will be completely overwritten by the later one. * This recursive merging strategy applies not only to field mappings, but also * root options like dynamic_templates and meta. If an * earlier component contains a dynamic_templates block, then by * default new dynamic_templates entries are appended onto the end. * If an entry already exists with the same key, then it is overwritten by the * new definition. * * @see Documentation * on elastic.co */ public CompletableFuture putIndexTemplate(PutIndexTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutIndexTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. *

* Elasticsearch applies templates to new indices based on an wildcard pattern * that matches the index name. Index templates are applied during data stream * or index creation. For data streams, these settings and mappings are applied * when the stream's backing indices are created. Settings and mappings * specified in a create index API request override any settings or mappings * specified in an index template. Changes to index templates do not affect * existing indices, including the existing backing indices of a data stream. *

* You can use C-style /* *\/ block comments in index templates. * You can include comments anywhere in the request body, except before the * opening curly bracket. *

* Multiple matching templates *

* If multiple index templates match the name of a new index or data stream, the * template with the highest priority is used. *

* Multiple templates with overlapping index patterns at the same priority are * not allowed and an error will be thrown when attempting to create a template * matching an existing index template at identical priorities. *

* Composing aliases, mappings, and settings *

* When multiple component templates are specified in the * composed_of field for an index template, they are merged in the * order specified, meaning that later component templates override earlier * component templates. Any mappings, settings, or aliases from the parent index * template are merged in next. Finally, any configuration on the index request * itself is merged. Mapping definitions are merged recursively, which means * that later mapping components can introduce new field mappings and update the * mapping configuration. If a field mapping is already contained in an earlier * component, its definition will be completely overwritten by the later one. * This recursive merging strategy applies not only to field mappings, but also * root options like dynamic_templates and meta. If an * earlier component contains a dynamic_templates block, then by * default new dynamic_templates entries are appended onto the end. * If an entry already exists with the same key, then it is overwritten by the * new definition. * * @param fn * a function that initializes a builder to create the * {@link PutIndexTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putIndexTemplate( Function> fn) { return putIndexTemplate(fn.apply(new PutIndexTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.put_mapping /** * Update field mappings. Add new fields to an existing data stream or index. * You can use the update mapping API to: *

    *
  • Add a new field to an existing index
  • *
  • Update mappings for multiple indices in a single request
  • *
  • Add new properties to an object field
  • *
  • Enable multi-fields for an existing field
  • *
  • Update supported mapping parameters
  • *
  • Change a field's mapping using reindexing
  • *
  • Rename a field using a field alias
  • *
*

* Learn how to use the update mapping API with practical examples in the * Update * mapping API examples guide. * * @see Documentation * on elastic.co */ public CompletableFuture putMapping(PutMappingRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutMappingRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update field mappings. Add new fields to an existing data stream or index. * You can use the update mapping API to: *

    *
  • Add a new field to an existing index
  • *
  • Update mappings for multiple indices in a single request
  • *
  • Add new properties to an object field
  • *
  • Enable multi-fields for an existing field
  • *
  • Update supported mapping parameters
  • *
  • Change a field's mapping using reindexing
  • *
  • Rename a field using a field alias
  • *
*

* Learn how to use the update mapping API with practical examples in the * Update * mapping API examples guide. * * @param fn * a function that initializes a builder to create the * {@link PutMappingRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putMapping( Function> fn) { return putMapping(fn.apply(new PutMappingRequest.Builder()).build()); } // ----- Endpoint: indices.put_settings /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. *

* To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be * found in index settings documentation. To preserve existing settings from * being updated, set the preserve_existing parameter to * true. *

* For performance optimization during bulk indexing, you can disable the * refresh interval. Refer to disable * refresh interval for an example. There are multiple valid ways to * represent index settings in the request body. You can specify only the * setting, for example: * *

	 * {
	 *   "number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can use an index setting object: * *

	 * {
	 *   "index": {
	 *     "number_of_replicas": 1
	 *   }
	 * }
	 * 
	 * 
*

* Or you can use dot annotation: * *

	 * {
	 *   "index.number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can embed any of the aforementioned options in a settings * object. For example: * *

	 * {
	 *   "settings": {
	 *     "index": {
	 *       "number_of_replicas": 1
	 *     }
	 *   }
	 * }
	 * 
	 * 
*

* NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the * analyzer for a data stream's write index and future backing indices, update * the analyzer in the index template used by the stream. Then roll over the * data stream to apply the new analyzer to the stream's write index and future * backing indices. This affects searches and any new data added to the stream * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. * Refer to updating * analyzers on existing indices for step-by-step examples. * * @see Documentation * on elastic.co */ public CompletableFuture putSettings(PutIndicesSettingsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutIndicesSettingsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. *

* To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be * found in index settings documentation. To preserve existing settings from * being updated, set the preserve_existing parameter to * true. *

* For performance optimization during bulk indexing, you can disable the * refresh interval. Refer to disable * refresh interval for an example. There are multiple valid ways to * represent index settings in the request body. You can specify only the * setting, for example: * *

	 * {
	 *   "number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can use an index setting object: * *

	 * {
	 *   "index": {
	 *     "number_of_replicas": 1
	 *   }
	 * }
	 * 
	 * 
*

* Or you can use dot annotation: * *

	 * {
	 *   "index.number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can embed any of the aforementioned options in a settings * object. For example: * *

	 * {
	 *   "settings": {
	 *     "index": {
	 *       "number_of_replicas": 1
	 *     }
	 *   }
	 * }
	 * 
	 * 
*

* NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the * analyzer for a data stream's write index and future backing indices, update * the analyzer in the index template used by the stream. Then roll over the * data stream to apply the new analyzer to the stream's write index and future * backing indices. This affects searches and any new data added to the stream * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. * Refer to updating * analyzers on existing indices for step-by-step examples. * * @param fn * a function that initializes a builder to create the * {@link PutIndicesSettingsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putSettings( Function> fn) { return putSettings(fn.apply(new PutIndicesSettingsRequest.Builder()).build()); } /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. *

* To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be * found in index settings documentation. To preserve existing settings from * being updated, set the preserve_existing parameter to * true. *

* For performance optimization during bulk indexing, you can disable the * refresh interval. Refer to disable * refresh interval for an example. There are multiple valid ways to * represent index settings in the request body. You can specify only the * setting, for example: * *

	 * {
	 *   "number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can use an index setting object: * *

	 * {
	 *   "index": {
	 *     "number_of_replicas": 1
	 *   }
	 * }
	 * 
	 * 
*

* Or you can use dot annotation: * *

	 * {
	 *   "index.number_of_replicas": 1
	 * }
	 * 
	 * 
*

* Or you can embed any of the aforementioned options in a settings * object. For example: * *

	 * {
	 *   "settings": {
	 *     "index": {
	 *       "number_of_replicas": 1
	 *     }
	 *   }
	 * }
	 * 
	 * 
*

* NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the * analyzer for a data stream's write index and future backing indices, update * the analyzer in the index template used by the stream. Then roll over the * data stream to apply the new analyzer to the stream's write index and future * backing indices. This affects searches and any new data added to the stream * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. * Refer to updating * analyzers on existing indices for step-by-step examples. * * @see Documentation * on elastic.co */ public CompletableFuture putSettings() { return this.transport.performRequestAsync(new PutIndicesSettingsRequest.Builder().build(), PutIndicesSettingsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.put_template /** * Create or update a legacy index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. * Elasticsearch applies templates to new indices based on an index pattern that * matches the index name. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. *

* Composable templates always take precedence over legacy templates. If no * composable template matches a new index, matching legacy templates are * applied according to their order. *

* Index templates are only applied during index creation. Changes to index * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. *

* You can use C-style /* *\/ block comments in index templates. * You can include comments anywhere in the request body, except before the * opening curly bracket. *

* Indices matching multiple templates *

* Multiple index templates can potentially match an index, in this case, both * the settings and mappings are merged into the final configuration of the * index. The order of the merging can be controlled using the order parameter, * with lower order being applied first, and higher orders overriding them. * NOTE: Multiple matching templates with the same order value will result in a * non-deterministic merging order. * * @see Documentation * on elastic.co */ public CompletableFuture putTemplate(PutTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) PutTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update a legacy index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. * Elasticsearch applies templates to new indices based on an index pattern that * matches the index name. *

* IMPORTANT: This documentation is about legacy index templates, which are * deprecated and will be replaced by the composable templates introduced in * Elasticsearch 7.8. *

* Composable templates always take precedence over legacy templates. If no * composable template matches a new index, matching legacy templates are * applied according to their order. *

* Index templates are only applied during index creation. Changes to index * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. *

* You can use C-style /* *\/ block comments in index templates. * You can include comments anywhere in the request body, except before the * opening curly bracket. *

* Indices matching multiple templates *

* Multiple index templates can potentially match an index, in this case, both * the settings and mappings are merged into the final configuration of the * index. The order of the merging can be controlled using the order parameter, * with lower order being applied first, and higher orders overriding them. * NOTE: Multiple matching templates with the same order value will result in a * non-deterministic merging order. * * @param fn * a function that initializes a builder to create the * {@link PutTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture putTemplate( Function> fn) { return putTemplate(fn.apply(new PutTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.recovery /** * Get index recovery information. Get information about ongoing and completed * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

* All recoveries, whether ongoing or complete, are kept in the cluster state * and may be reported on at any time. *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for * search and indexing. *

* Recovery automatically occurs during the following processes: *

    *
  • When creating an index for the first time.
  • *
  • When a node rejoins the cluster and starts up any missing primary shard * copies using the data that it holds in its data path.
  • *
  • Creation of new replica shard copies from the primary.
  • *
  • Relocation of a shard copy to a different node in the same cluster.
  • *
  • A snapshot restore operation.
  • *
  • A clone, shrink, or split operation.
  • *
*

* You can determine the cause of a shard recovery using the recovery or cat * recovery APIs. *

* The index recovery API reports information about completed recoveries only * for shard copies that currently exist in the cluster. It only reports the * last recovery for each shard copy and does not report historical information * about earlier recoveries, nor does it report information about the recoveries * of shard copies that no longer exist. This means that if a shard copy * completes a recovery and then Elasticsearch relocates it onto a different * node then the information about the original recovery will not be shown in * the recovery API. * * @see Documentation * on elastic.co */ public CompletableFuture recovery(RecoveryRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) RecoveryRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index recovery information. Get information about ongoing and completed * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

* All recoveries, whether ongoing or complete, are kept in the cluster state * and may be reported on at any time. *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for * search and indexing. *

* Recovery automatically occurs during the following processes: *

    *
  • When creating an index for the first time.
  • *
  • When a node rejoins the cluster and starts up any missing primary shard * copies using the data that it holds in its data path.
  • *
  • Creation of new replica shard copies from the primary.
  • *
  • Relocation of a shard copy to a different node in the same cluster.
  • *
  • A snapshot restore operation.
  • *
  • A clone, shrink, or split operation.
  • *
*

* You can determine the cause of a shard recovery using the recovery or cat * recovery APIs. *

* The index recovery API reports information about completed recoveries only * for shard copies that currently exist in the cluster. It only reports the * last recovery for each shard copy and does not report historical information * about earlier recoveries, nor does it report information about the recoveries * of shard copies that no longer exist. This means that if a shard copy * completes a recovery and then Elasticsearch relocates it onto a different * node then the information about the original recovery will not be shown in * the recovery API. * * @param fn * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation * on elastic.co */ public final CompletableFuture recovery( Function> fn) { return recovery(fn.apply(new RecoveryRequest.Builder()).build()); } /** * Get index recovery information. Get information about ongoing and completed * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

* All recoveries, whether ongoing or complete, are kept in the cluster state * and may be reported on at any time. *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for * search and indexing. *

* Recovery automatically occurs during the following processes: *

    *
  • When creating an index for the first time.
  • *
  • When a node rejoins the cluster and starts up any missing primary shard * copies using the data that it holds in its data path.
  • *
  • Creation of new replica shard copies from the primary.
  • *
  • Relocation of a shard copy to a different node in the same cluster.
  • *
  • A snapshot restore operation.
  • *
  • A clone, shrink, or split operation.
  • *
*

* You can determine the cause of a shard recovery using the recovery or cat * recovery APIs. *

* The index recovery API reports information about completed recoveries only * for shard copies that currently exist in the cluster. It only reports the * last recovery for each shard copy and does not report historical information * about earlier recoveries, nor does it report information about the recoveries * of shard copies that no longer exist. This means that if a shard copy * completes a recovery and then Elasticsearch relocates it onto a different * node then the information about the original recovery will not be shown in * the recovery API. * * @see Documentation * on elastic.co */ public CompletableFuture recovery() { return this.transport.performRequestAsync(new RecoveryRequest.Builder().build(), RecoveryRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.refresh /** * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. *

* By default, Elasticsearch periodically refreshes indices every second, but * only on indices that have received one search request or more in the last 30 * seconds. You can change this default interval with the * index.refresh_interval setting. *

* Refresh requests are synchronous and do not return a response until the * refresh operation completes. *

* Refreshes are resource-intensive. To ensure good cluster performance, it's * recommended to wait for Elasticsearch's periodic refresh rather than * performing an explicit refresh when possible. *

* If your application workflow indexes documents and then runs a search to * retrieve the indexed document, it's recommended to use the index API's * refresh=wait_for query parameter option. This option ensures the * indexing operation waits for a periodic refresh before running the search. * * @see Documentation * on elastic.co */ public CompletableFuture refresh(RefreshRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) RefreshRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. *

* By default, Elasticsearch periodically refreshes indices every second, but * only on indices that have received one search request or more in the last 30 * seconds. You can change this default interval with the * index.refresh_interval setting. *

* Refresh requests are synchronous and do not return a response until the * refresh operation completes. *

* Refreshes are resource-intensive. To ensure good cluster performance, it's * recommended to wait for Elasticsearch's periodic refresh rather than * performing an explicit refresh when possible. *

* If your application workflow indexes documents and then runs a search to * retrieve the indexed document, it's recommended to use the index API's * refresh=wait_for query parameter option. This option ensures the * indexing operation waits for a periodic refresh before running the search. * * @param fn * a function that initializes a builder to create the * {@link RefreshRequest} * @see Documentation * on elastic.co */ public final CompletableFuture refresh( Function> fn) { return refresh(fn.apply(new RefreshRequest.Builder()).build()); } /** * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. *

* By default, Elasticsearch periodically refreshes indices every second, but * only on indices that have received one search request or more in the last 30 * seconds. You can change this default interval with the * index.refresh_interval setting. *

* Refresh requests are synchronous and do not return a response until the * refresh operation completes. *

* Refreshes are resource-intensive. To ensure good cluster performance, it's * recommended to wait for Elasticsearch's periodic refresh rather than * performing an explicit refresh when possible. *

* If your application workflow indexes documents and then runs a search to * retrieve the indexed document, it's recommended to use the index API's * refresh=wait_for query parameter option. This option ensures the * indexing operation waits for a periodic refresh before running the search. * * @see Documentation * on elastic.co */ public CompletableFuture refresh() { return this.transport.performRequestAsync(new RefreshRequest.Builder().build(), RefreshRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.reload_search_analyzers /** * Reload search analyzers. Reload an index's search analyzers and their * resources. For data streams, the API reloads search analyzers and resources * for the stream's backing indices. *

* IMPORTANT: After reloading the search analyzers you should clear the request * cache to make sure it doesn't contain responses derived from the previous * versions of the analyzer. *

* You can use the reload search analyzers API to pick up changes to synonym * files used in the synonym_graph or synonym token * filter of a search analyzer. To be eligible, the token filter must have an * updateable flag of true and only be used in search * analyzers. *

* NOTE: This API does not perform a reload for each shard of an index. Instead, * it performs a reload for each node containing index shards. As a result, the * total shard count returned by the API can differ from the number of index * shards. Because reloading affects every node with an index shard, it is * important to update the synonym file on every data node in the * cluster--including nodes that don't contain a shard replica--before using * this API. This ensures the synonym file is updated everywhere in the cluster * in case shards are relocated in the future. * * @see Documentation * on elastic.co */ public CompletableFuture reloadSearchAnalyzers( ReloadSearchAnalyzersRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ReloadSearchAnalyzersRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Reload search analyzers. Reload an index's search analyzers and their * resources. For data streams, the API reloads search analyzers and resources * for the stream's backing indices. *

* IMPORTANT: After reloading the search analyzers you should clear the request * cache to make sure it doesn't contain responses derived from the previous * versions of the analyzer. *

* You can use the reload search analyzers API to pick up changes to synonym * files used in the synonym_graph or synonym token * filter of a search analyzer. To be eligible, the token filter must have an * updateable flag of true and only be used in search * analyzers. *

* NOTE: This API does not perform a reload for each shard of an index. Instead, * it performs a reload for each node containing index shards. As a result, the * total shard count returned by the API can differ from the number of index * shards. Because reloading affects every node with an index shard, it is * important to update the synonym file on every data node in the * cluster--including nodes that don't contain a shard replica--before using * this API. This ensures the synonym file is updated everywhere in the cluster * in case shards are relocated in the future. * * @param fn * a function that initializes a builder to create the * {@link ReloadSearchAnalyzersRequest} * @see Documentation * on elastic.co */ public final CompletableFuture reloadSearchAnalyzers( Function> fn) { return reloadSearchAnalyzers(fn.apply(new ReloadSearchAnalyzersRequest.Builder()).build()); } // ----- Endpoint: indices.resolve_cluster /** * Resolve the cluster. *

* Resolve the specified index expressions to return information about each * cluster, including the local "querying" cluster, if included. If no * index expression is provided, the API will return information about all the * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. *

* You use the same index expression with this endpoint as you would for * cross-cluster search. Index and cluster exclusions are also supported with * this endpoint. *

* For each cluster in the index expression, information is returned about: *

    *
  • Whether the querying ("local") cluster is currently connected * to each remote cluster specified in the index expression. Note that this * endpoint actively attempts to contact the remote clusters, unlike the * remote/info endpoint.
  • *
  • Whether each remote cluster is configured with * skip_unavailable as true or * false.
  • *
  • Whether there are any indices, aliases, or data streams on that cluster * that match the index expression.
  • *
  • Whether the search is likely to have errors returned when you do the * cross-cluster search (including any authorization errors if you do not have * permission to query the index).
  • *
  • Cluster version information, including the Elasticsearch server * version.
  • *
*

* For example, * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns * information about the local cluster and all remotely configured clusters that * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. *

Note on backwards compatibility

*

* The ability to query without an index expression was added in version 8.18, * so when querying remote clusters older than that, the local cluster will send * the index expression dummy* to those remote clusters. Thus, if * an errors occur, you may see a reference to that index expression even though * you didn't request it. If it causes a problem, you can instead include an * index expression like *:* to bypass the issue. *

Advantages of using this endpoint before a cross-cluster search

*

* You may want to exclude a cluster or index from a search when: *

    *
  • A remote cluster is not currently connected and is configured with * skip_unavailable=false. Running a cross-cluster search under * those conditions will cause the entire search to fail.
  • *
  • A cluster has no matching indices, aliases or data streams for the index * expression (or your user does not have permissions to search them). For * example, suppose your index expression is logs*,remote1:logs* * and the remote1 cluster has no indices, aliases or data streams that match * logs*. In that case, that cluster will return no results from * that cluster if you include it in a cross-cluster search.
  • *
  • The index expression (combined with any query parameters you specify) * will likely cause an exception to be thrown when you do the search. In these * cases, the "error" field in the _resolve/cluster * response will be present. (This is also where security/permission errors will * be shown.)
  • *
  • A remote cluster is an older version that does not support the feature * you want to use in your search.
  • *
*

Test availability of remote clusters

*

* The remote/info endpoint is commonly used to test whether the * "local" cluster (the cluster being queried) is connected to its * remote clusters, but it does not necessarily reflect whether the remote * cluster is available or not. The remote cluster may be available, while the * local cluster is not currently connected to it. *

* You can use the _resolve/cluster API to attempt to reconnect to * remote clusters. For example with GET _resolve/cluster or * GET _resolve/cluster/*:*. The connected field in * the response will indicate whether it was successful. If a connection was * (re-)established, this will also cause the remote/info endpoint * to now indicate a connected status. * * @see Documentation * on elastic.co */ public CompletableFuture resolveCluster(ResolveClusterRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ResolveClusterRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Resolve the cluster. *

* Resolve the specified index expressions to return information about each * cluster, including the local "querying" cluster, if included. If no * index expression is provided, the API will return information about all the * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. *

* You use the same index expression with this endpoint as you would for * cross-cluster search. Index and cluster exclusions are also supported with * this endpoint. *

* For each cluster in the index expression, information is returned about: *

    *
  • Whether the querying ("local") cluster is currently connected * to each remote cluster specified in the index expression. Note that this * endpoint actively attempts to contact the remote clusters, unlike the * remote/info endpoint.
  • *
  • Whether each remote cluster is configured with * skip_unavailable as true or * false.
  • *
  • Whether there are any indices, aliases, or data streams on that cluster * that match the index expression.
  • *
  • Whether the search is likely to have errors returned when you do the * cross-cluster search (including any authorization errors if you do not have * permission to query the index).
  • *
  • Cluster version information, including the Elasticsearch server * version.
  • *
*

* For example, * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns * information about the local cluster and all remotely configured clusters that * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. *

Note on backwards compatibility

*

* The ability to query without an index expression was added in version 8.18, * so when querying remote clusters older than that, the local cluster will send * the index expression dummy* to those remote clusters. Thus, if * an errors occur, you may see a reference to that index expression even though * you didn't request it. If it causes a problem, you can instead include an * index expression like *:* to bypass the issue. *

Advantages of using this endpoint before a cross-cluster search

*

* You may want to exclude a cluster or index from a search when: *

    *
  • A remote cluster is not currently connected and is configured with * skip_unavailable=false. Running a cross-cluster search under * those conditions will cause the entire search to fail.
  • *
  • A cluster has no matching indices, aliases or data streams for the index * expression (or your user does not have permissions to search them). For * example, suppose your index expression is logs*,remote1:logs* * and the remote1 cluster has no indices, aliases or data streams that match * logs*. In that case, that cluster will return no results from * that cluster if you include it in a cross-cluster search.
  • *
  • The index expression (combined with any query parameters you specify) * will likely cause an exception to be thrown when you do the search. In these * cases, the "error" field in the _resolve/cluster * response will be present. (This is also where security/permission errors will * be shown.)
  • *
  • A remote cluster is an older version that does not support the feature * you want to use in your search.
  • *
*

Test availability of remote clusters

*

* The remote/info endpoint is commonly used to test whether the * "local" cluster (the cluster being queried) is connected to its * remote clusters, but it does not necessarily reflect whether the remote * cluster is available or not. The remote cluster may be available, while the * local cluster is not currently connected to it. *

* You can use the _resolve/cluster API to attempt to reconnect to * remote clusters. For example with GET _resolve/cluster or * GET _resolve/cluster/*:*. The connected field in * the response will indicate whether it was successful. If a connection was * (re-)established, this will also cause the remote/info endpoint * to now indicate a connected status. * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} * @see Documentation * on elastic.co */ public final CompletableFuture resolveCluster( Function> fn) { return resolveCluster(fn.apply(new ResolveClusterRequest.Builder()).build()); } /** * Resolve the cluster. *

* Resolve the specified index expressions to return information about each * cluster, including the local "querying" cluster, if included. If no * index expression is provided, the API will return information about all the * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. *

* You use the same index expression with this endpoint as you would for * cross-cluster search. Index and cluster exclusions are also supported with * this endpoint. *

* For each cluster in the index expression, information is returned about: *

    *
  • Whether the querying ("local") cluster is currently connected * to each remote cluster specified in the index expression. Note that this * endpoint actively attempts to contact the remote clusters, unlike the * remote/info endpoint.
  • *
  • Whether each remote cluster is configured with * skip_unavailable as true or * false.
  • *
  • Whether there are any indices, aliases, or data streams on that cluster * that match the index expression.
  • *
  • Whether the search is likely to have errors returned when you do the * cross-cluster search (including any authorization errors if you do not have * permission to query the index).
  • *
  • Cluster version information, including the Elasticsearch server * version.
  • *
*

* For example, * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns * information about the local cluster and all remotely configured clusters that * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. *

Note on backwards compatibility

*

* The ability to query without an index expression was added in version 8.18, * so when querying remote clusters older than that, the local cluster will send * the index expression dummy* to those remote clusters. Thus, if * an errors occur, you may see a reference to that index expression even though * you didn't request it. If it causes a problem, you can instead include an * index expression like *:* to bypass the issue. *

Advantages of using this endpoint before a cross-cluster search

*

* You may want to exclude a cluster or index from a search when: *

    *
  • A remote cluster is not currently connected and is configured with * skip_unavailable=false. Running a cross-cluster search under * those conditions will cause the entire search to fail.
  • *
  • A cluster has no matching indices, aliases or data streams for the index * expression (or your user does not have permissions to search them). For * example, suppose your index expression is logs*,remote1:logs* * and the remote1 cluster has no indices, aliases or data streams that match * logs*. In that case, that cluster will return no results from * that cluster if you include it in a cross-cluster search.
  • *
  • The index expression (combined with any query parameters you specify) * will likely cause an exception to be thrown when you do the search. In these * cases, the "error" field in the _resolve/cluster * response will be present. (This is also where security/permission errors will * be shown.)
  • *
  • A remote cluster is an older version that does not support the feature * you want to use in your search.
  • *
*

Test availability of remote clusters

*

* The remote/info endpoint is commonly used to test whether the * "local" cluster (the cluster being queried) is connected to its * remote clusters, but it does not necessarily reflect whether the remote * cluster is available or not. The remote cluster may be available, while the * local cluster is not currently connected to it. *

* You can use the _resolve/cluster API to attempt to reconnect to * remote clusters. For example with GET _resolve/cluster or * GET _resolve/cluster/*:*. The connected field in * the response will indicate whether it was successful. If a connection was * (re-)established, this will also cause the remote/info endpoint * to now indicate a connected status. * * @see Documentation * on elastic.co */ public CompletableFuture resolveCluster() { return this.transport.performRequestAsync(new ResolveClusterRequest.Builder().build(), ResolveClusterRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.resolve_index /** * Resolve indices. Resolve the names and/or index patterns for indices, * aliases, and data streams. Multiple patterns and remote clusters are * supported. * * @see Documentation * on elastic.co */ public CompletableFuture resolveIndex(ResolveIndexRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ResolveIndexRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Resolve indices. Resolve the names and/or index patterns for indices, * aliases, and data streams. Multiple patterns and remote clusters are * supported. * * @param fn * a function that initializes a builder to create the * {@link ResolveIndexRequest} * @see Documentation * on elastic.co */ public final CompletableFuture resolveIndex( Function> fn) { return resolveIndex(fn.apply(new ResolveIndexRequest.Builder()).build()); } // ----- Endpoint: indices.rollover /** * Roll over to a new index. TIP: It is recommended to use the index lifecycle * rollover action to automate rollovers. *

* The rollover API creates a new index for a data stream or index alias. The * API behavior depends on the rollover target. *

* Roll over a data stream *

* If you roll over a data stream, the API creates a new write index for the * stream. The stream's previous write index becomes a regular backing index. A * rollover also increments the data stream's generation. *

* Roll over an index alias with a write index *

* TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a * write index to manage time series data. Data streams replace this * functionality, require less maintenance, and automatically integrate with * data tiers. *

* If an index alias points to multiple indices, one of the indices must be a * write index. The rollover API creates a new write index for the alias with * is_write_index set to true. The API also * sets is_write_index to false for the previous write * index. *

* Roll over an index alias with one index *

* If you roll over an index alias that points to only one index, the API * creates a new index for the alias and removes the original index from the * alias. *

* NOTE: A rollover creates a new index and is subject to the * wait_for_active_shards setting. *

* Increment index names for an alias *

* When you roll over an index alias, you can specify a name for the new index. * If you don't specify a name and the current index ends with - * and a number, such as my-index-000001 or * my-index-3, the new index name increments that number. For * example, if you roll over an alias with a current index of * my-index-000001, the rollover creates a new index named * my-index-000002. This number is always six characters and * zero-padded, regardless of the previous index's name. *

* If you use an index alias for time series data, you can use date math in the * index name to track the rollover date. For example, you can create an alias * that points to an index named <my-index-{now/d}-000001>. * If you create the index on May 6, 2099, the index's name is * my-index-2099.05.06-000001. If you roll over the alias on May 7, * 2099, the new index's name is my-index-2099.05.07-000002. * * @see Documentation * on elastic.co */ public CompletableFuture rollover(RolloverRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) RolloverRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Roll over to a new index. TIP: It is recommended to use the index lifecycle * rollover action to automate rollovers. *

* The rollover API creates a new index for a data stream or index alias. The * API behavior depends on the rollover target. *

* Roll over a data stream *

* If you roll over a data stream, the API creates a new write index for the * stream. The stream's previous write index becomes a regular backing index. A * rollover also increments the data stream's generation. *

* Roll over an index alias with a write index *

* TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a * write index to manage time series data. Data streams replace this * functionality, require less maintenance, and automatically integrate with * data tiers. *

* If an index alias points to multiple indices, one of the indices must be a * write index. The rollover API creates a new write index for the alias with * is_write_index set to true. The API also * sets is_write_index to false for the previous write * index. *

* Roll over an index alias with one index *

* If you roll over an index alias that points to only one index, the API * creates a new index for the alias and removes the original index from the * alias. *

* NOTE: A rollover creates a new index and is subject to the * wait_for_active_shards setting. *

* Increment index names for an alias *

* When you roll over an index alias, you can specify a name for the new index. * If you don't specify a name and the current index ends with - * and a number, such as my-index-000001 or * my-index-3, the new index name increments that number. For * example, if you roll over an alias with a current index of * my-index-000001, the rollover creates a new index named * my-index-000002. This number is always six characters and * zero-padded, regardless of the previous index's name. *

* If you use an index alias for time series data, you can use date math in the * index name to track the rollover date. For example, you can create an alias * that points to an index named <my-index-{now/d}-000001>. * If you create the index on May 6, 2099, the index's name is * my-index-2099.05.06-000001. If you roll over the alias on May 7, * 2099, the new index's name is my-index-2099.05.07-000002. * * @param fn * a function that initializes a builder to create the * {@link RolloverRequest} * @see Documentation * on elastic.co */ public final CompletableFuture rollover( Function> fn) { return rollover(fn.apply(new RolloverRequest.Builder()).build()); } // ----- Endpoint: indices.segments /** * Get index segments. Get low-level information about the Lucene segments in * index shards. For data streams, the API returns information about the * stream's backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture segments(SegmentsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) SegmentsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index segments. Get low-level information about the Lucene segments in * index shards. For data streams, the API returns information about the * stream's backing indices. * * @param fn * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture segments( Function> fn) { return segments(fn.apply(new SegmentsRequest.Builder()).build()); } /** * Get index segments. Get low-level information about the Lucene segments in * index shards. For data streams, the API returns information about the * stream's backing indices. * * @see Documentation * on elastic.co */ public CompletableFuture segments() { return this.transport.performRequestAsync(new SegmentsRequest.Builder().build(), SegmentsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.shard_stores /** * Get index shard stores. Get store information about replica shards in one or * more indices. For data streams, the API retrieves store information for the * stream's backing indices. *

* The index shard stores API returns the following information: *

    *
  • The node on which each replica shard exists.
  • *
  • The allocation ID for each replica shard.
  • *
  • A unique ID for each replica shard.
  • *
  • Any errors encountered while opening the shard index or from an earlier * failure.
  • *
*

* By default, the API returns store information only for primary shards that * are unassigned or have one or more unassigned replica shards. * * @see Documentation * on elastic.co */ public CompletableFuture shardStores(ShardStoresRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ShardStoresRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index shard stores. Get store information about replica shards in one or * more indices. For data streams, the API retrieves store information for the * stream's backing indices. *

* The index shard stores API returns the following information: *

    *
  • The node on which each replica shard exists.
  • *
  • The allocation ID for each replica shard.
  • *
  • A unique ID for each replica shard.
  • *
  • Any errors encountered while opening the shard index or from an earlier * failure.
  • *
*

* By default, the API returns store information only for primary shards that * are unassigned or have one or more unassigned replica shards. * * @param fn * a function that initializes a builder to create the * {@link ShardStoresRequest} * @see Documentation * on elastic.co */ public final CompletableFuture shardStores( Function> fn) { return shardStores(fn.apply(new ShardStoresRequest.Builder()).build()); } /** * Get index shard stores. Get store information about replica shards in one or * more indices. For data streams, the API retrieves store information for the * stream's backing indices. *

* The index shard stores API returns the following information: *

    *
  • The node on which each replica shard exists.
  • *
  • The allocation ID for each replica shard.
  • *
  • A unique ID for each replica shard.
  • *
  • Any errors encountered while opening the shard index or from an earlier * failure.
  • *
*

* By default, the API returns store information only for primary shards that * are unassigned or have one or more unassigned replica shards. * * @see Documentation * on elastic.co */ public CompletableFuture shardStores() { return this.transport.performRequestAsync(new ShardStoresRequest.Builder().build(), ShardStoresRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.shrink /** * Shrink an index. Shrink an index into a new index with fewer primary shards. *

* Before you can shrink an index: *

    *
  • The index must be read-only.
  • *
  • A copy of every shard in the index must reside on the same node.
  • *
  • The index must have a green health status.
  • *
*

* To make shard allocation easier, we recommend you also remove the index's * replica shards. You can later re-add replica shards as part of the shrink * operation. *

* The requested number of primary shards in the target index must be a factor * of the number of shards in the source index. For example an index with 8 * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in * the index is a prime number it can only be shrunk into a single primary shard * Before shrinking, a (primary or replica) copy of every shard in the index * must be present on the same node. *

* The current write index on a data stream cannot be shrunk. In order to shrink * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be shrunk. *

* A shrink operation: *

    *
  • Creates a new target index with the same definition as the source index, * but with a smaller number of primary shards.
  • *
  • Hard-links segments from the source index into the target index. If the * file system does not support hard-linking, then all segments are copied into * the new index, which is a much more time consuming process. Also if using * multiple data paths, shards on different data paths require a full copy of * segment files if they are not on the same disk since hardlinks do not work * across disks.
  • *
  • Recovers the target index as though it were a closed index which had just * been re-opened. Recovers shards to the * .routing.allocation.initial_recovery._id index setting.
  • *
*

* IMPORTANT: Indices can only be shrunk if they satisfy the following * requirements: *

    *
  • The target index must not exist.
  • *
  • The source index must have more primary shards than the target * index.
  • *
  • The number of primary shards in the target index must be a factor of the * number of primary shards in the source index. The source index must have more * primary shards than the target index.
  • *
  • The index must not contain more than 2,147,483,519 documents in total * across all shards that will be shrunk into a single shard on the target index * as this is the maximum number of docs that can fit into a single shard.
  • *
  • The node handling the shrink process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
* * @see Documentation * on elastic.co */ public CompletableFuture shrink(ShrinkRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ShrinkRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Shrink an index. Shrink an index into a new index with fewer primary shards. *

* Before you can shrink an index: *

    *
  • The index must be read-only.
  • *
  • A copy of every shard in the index must reside on the same node.
  • *
  • The index must have a green health status.
  • *
*

* To make shard allocation easier, we recommend you also remove the index's * replica shards. You can later re-add replica shards as part of the shrink * operation. *

* The requested number of primary shards in the target index must be a factor * of the number of shards in the source index. For example an index with 8 * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in * the index is a prime number it can only be shrunk into a single primary shard * Before shrinking, a (primary or replica) copy of every shard in the index * must be present on the same node. *

* The current write index on a data stream cannot be shrunk. In order to shrink * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be shrunk. *

* A shrink operation: *

    *
  • Creates a new target index with the same definition as the source index, * but with a smaller number of primary shards.
  • *
  • Hard-links segments from the source index into the target index. If the * file system does not support hard-linking, then all segments are copied into * the new index, which is a much more time consuming process. Also if using * multiple data paths, shards on different data paths require a full copy of * segment files if they are not on the same disk since hardlinks do not work * across disks.
  • *
  • Recovers the target index as though it were a closed index which had just * been re-opened. Recovers shards to the * .routing.allocation.initial_recovery._id index setting.
  • *
*

* IMPORTANT: Indices can only be shrunk if they satisfy the following * requirements: *

    *
  • The target index must not exist.
  • *
  • The source index must have more primary shards than the target * index.
  • *
  • The number of primary shards in the target index must be a factor of the * number of primary shards in the source index. The source index must have more * primary shards than the target index.
  • *
  • The index must not contain more than 2,147,483,519 documents in total * across all shards that will be shrunk into a single shard on the target index * as this is the maximum number of docs that can fit into a single shard.
  • *
  • The node handling the shrink process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
* * @param fn * a function that initializes a builder to create the * {@link ShrinkRequest} * @see Documentation * on elastic.co */ public final CompletableFuture shrink( Function> fn) { return shrink(fn.apply(new ShrinkRequest.Builder()).build()); } // ----- Endpoint: indices.simulate_index_template /** * Simulate an index. Get the index configuration that would be applied to the * specified index from an existing index template. * * @see Documentation * on elastic.co */ public CompletableFuture simulateIndexTemplate( SimulateIndexTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) SimulateIndexTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Simulate an index. Get the index configuration that would be applied to the * specified index from an existing index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateIndexTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture simulateIndexTemplate( Function> fn) { return simulateIndexTemplate(fn.apply(new SimulateIndexTemplateRequest.Builder()).build()); } // ----- Endpoint: indices.simulate_template /** * Simulate an index template. Get the index configuration that would be applied * by a particular index template. * * @see Documentation * on elastic.co */ public CompletableFuture simulateTemplate(SimulateTemplateRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) SimulateTemplateRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Simulate an index template. Get the index configuration that would be applied * by a particular index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateTemplateRequest} * @see Documentation * on elastic.co */ public final CompletableFuture simulateTemplate( Function> fn) { return simulateTemplate(fn.apply(new SimulateTemplateRequest.Builder()).build()); } /** * Simulate an index template. Get the index configuration that would be applied * by a particular index template. * * @see Documentation * on elastic.co */ public CompletableFuture simulateTemplate() { return this.transport.performRequestAsync(new SimulateTemplateRequest.Builder().build(), SimulateTemplateRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.split /** * Split an index. Split an index into a new index with more primary shards. *
    *
  • *

    * Before you can split an index: *

    *
  • *
  • *

    * The index must be read-only. *

    *
  • *
  • *

    * The cluster health status must be green. *

    *
  • *
*

* You can do make an index read-only with the following request using the add * index block API: * *

	 * PUT /my_source_index/_block/write
	 * 
	 * 
*

* The current write index on a data stream cannot be split. In order to split * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be split. *

* The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing * shards specifies the hashing space that is used internally to distribute * documents across shards with consistent hashing. For instance, a 5 shard * index with number_of_routing_shards set to 30 (5 x 2 x 3) could * be split by a factor of 2 or 3. *

* A split operation: *

    *
  • Creates a new target index with the same definition as the source index, * but with a larger number of primary shards.
  • *
  • Hard-links segments from the source index into the target index. If the * file system doesn't support hard-linking, all segments are copied into the * new index, which is a much more time consuming process.
  • *
  • Hashes all documents again, after low level files are created, to delete * documents that belong to a different shard.
  • *
  • Recovers the target index as though it were a closed index which had just * been re-opened.
  • *
*

* IMPORTANT: Indices can only be split if they satisfy the following * requirements: *

    *
  • The target index must not exist.
  • *
  • The source index must have fewer primary shards than the target * index.
  • *
  • The number of primary shards in the target index must be a multiple of * the number of primary shards in the source index.
  • *
  • The node handling the split process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
* * @see Documentation * on elastic.co */ public CompletableFuture split(SplitRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) SplitRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Split an index. Split an index into a new index with more primary shards. *
    *
  • *

    * Before you can split an index: *

    *
  • *
  • *

    * The index must be read-only. *

    *
  • *
  • *

    * The cluster health status must be green. *

    *
  • *
*

* You can do make an index read-only with the following request using the add * index block API: * *

	 * PUT /my_source_index/_block/write
	 * 
	 * 
*

* The current write index on a data stream cannot be split. In order to split * the current write index, the data stream must first be rolled over so that a * new write index is created and then the previous write index can be split. *

* The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing * shards specifies the hashing space that is used internally to distribute * documents across shards with consistent hashing. For instance, a 5 shard * index with number_of_routing_shards set to 30 (5 x 2 x 3) could * be split by a factor of 2 or 3. *

* A split operation: *

    *
  • Creates a new target index with the same definition as the source index, * but with a larger number of primary shards.
  • *
  • Hard-links segments from the source index into the target index. If the * file system doesn't support hard-linking, all segments are copied into the * new index, which is a much more time consuming process.
  • *
  • Hashes all documents again, after low level files are created, to delete * documents that belong to a different shard.
  • *
  • Recovers the target index as though it were a closed index which had just * been re-opened.
  • *
*

* IMPORTANT: Indices can only be split if they satisfy the following * requirements: *

    *
  • The target index must not exist.
  • *
  • The source index must have fewer primary shards than the target * index.
  • *
  • The number of primary shards in the target index must be a multiple of * the number of primary shards in the source index.
  • *
  • The node handling the split process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
* * @param fn * a function that initializes a builder to create the * {@link SplitRequest} * @see Documentation * on elastic.co */ public final CompletableFuture split( Function> fn) { return split(fn.apply(new SplitRequest.Builder()).build()); } // ----- Endpoint: indices.stats /** * Get index statistics. For data streams, the API retrieves statistics for the * stream's backing indices. *

* By default, the returned statistics are index-level with * primaries and total aggregations. * primaries are the values for only the primary shards. * total are the accumulated values for both primary and replica * shards. *

* To get shard-level statistics, set the level parameter to * shards. *

* NOTE: When moving to another node, the shard-level statistics for a shard are * cleared. Although the shard is no longer part of the node, that node retains * any node-level statistics to which the shard contributed. * * @see Documentation * on elastic.co */ public CompletableFuture stats(IndicesStatsRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) IndicesStatsRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Get index statistics. For data streams, the API retrieves statistics for the * stream's backing indices. *

* By default, the returned statistics are index-level with * primaries and total aggregations. * primaries are the values for only the primary shards. * total are the accumulated values for both primary and replica * shards. *

* To get shard-level statistics, set the level parameter to * shards. *

* NOTE: When moving to another node, the shard-level statistics for a shard are * cleared. Although the shard is no longer part of the node, that node retains * any node-level statistics to which the shard contributed. * * @param fn * a function that initializes a builder to create the * {@link IndicesStatsRequest} * @see Documentation * on elastic.co */ public final CompletableFuture stats( Function> fn) { return stats(fn.apply(new IndicesStatsRequest.Builder()).build()); } /** * Get index statistics. For data streams, the API retrieves statistics for the * stream's backing indices. *

* By default, the returned statistics are index-level with * primaries and total aggregations. * primaries are the values for only the primary shards. * total are the accumulated values for both primary and replica * shards. *

* To get shard-level statistics, set the level parameter to * shards. *

* NOTE: When moving to another node, the shard-level statistics for a shard are * cleared. Although the shard is no longer part of the node, that node retains * any node-level statistics to which the shard contributed. * * @see Documentation * on elastic.co */ public CompletableFuture stats() { return this.transport.performRequestAsync(new IndicesStatsRequest.Builder().build(), IndicesStatsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.update_aliases /** * Create or update an alias. Adds a data stream or index to an alias. * * @see Documentation * on elastic.co */ public CompletableFuture updateAliases(UpdateAliasesRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) UpdateAliasesRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Create or update an alias. Adds a data stream or index to an alias. * * @param fn * a function that initializes a builder to create the * {@link UpdateAliasesRequest} * @see Documentation * on elastic.co */ public final CompletableFuture updateAliases( Function> fn) { return updateAliases(fn.apply(new UpdateAliasesRequest.Builder()).build()); } /** * Create or update an alias. Adds a data stream or index to an alias. * * @see Documentation * on elastic.co */ public CompletableFuture updateAliases() { return this.transport.performRequestAsync(new UpdateAliasesRequest.Builder().build(), UpdateAliasesRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: indices.validate_query /** * Validate a query. Validates a query without running it. * * @see Documentation * on elastic.co */ public CompletableFuture validateQuery(ValidateQueryRequest request) { @SuppressWarnings("unchecked") JsonEndpoint endpoint = (JsonEndpoint) ValidateQueryRequest._ENDPOINT; return this.transport.performRequestAsync(request, endpoint, this.transportOptions); } /** * Validate a query. Validates a query without running it. * * @param fn * a function that initializes a builder to create the * {@link ValidateQueryRequest} * @see Documentation * on elastic.co */ public final CompletableFuture validateQuery( Function> fn) { return validateQuery(fn.apply(new ValidateQueryRequest.Builder()).build()); } /** * Validate a query. Validates a query without running it. * * @see Documentation * on elastic.co */ public CompletableFuture validateQuery() { return this.transport.performRequestAsync(new ValidateQueryRequest.Builder().build(), ValidateQueryRequest._ENDPOINT, this.transportOptions); } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy