
co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesClient Maven / Gradle / Ivy
Show all versions of org.apache.servicemix.bundles.elasticsearch-java
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package co.elastic.clients.elasticsearch.indices;
import co.elastic.clients.ApiClient;
import co.elastic.clients.elasticsearch._types.ElasticsearchException;
import co.elastic.clients.elasticsearch._types.ErrorResponse;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.Endpoint;
import co.elastic.clients.transport.JsonEndpoint;
import co.elastic.clients.transport.Transport;
import co.elastic.clients.transport.TransportOptions;
import co.elastic.clients.transport.endpoints.BooleanResponse;
import co.elastic.clients.util.ObjectBuilder;
import java.io.IOException;
import java.util.function.Function;
import javax.annotation.Nullable;
//----------------------------------------------------------------
// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST.
//----------------------------------------------------------------
//
// This code is generated from the Elasticsearch API specification
// at https://github.com/elastic/elasticsearch-specification
//
// Manual updates to this file will be lost when the code is
// re-generated.
//
// If you find a property that is missing or wrongly typed, please
// open an issue or a PR on the API specification repository.
//
//----------------------------------------------------------------
/**
* Client for the indices namespace.
*/
public class ElasticsearchIndicesClient extends ApiClient {
public ElasticsearchIndicesClient(ElasticsearchTransport transport) {
super(transport, null);
}
public ElasticsearchIndicesClient(ElasticsearchTransport transport, @Nullable TransportOptions transportOptions) {
super(transport, transportOptions);
}
@Override
public ElasticsearchIndicesClient withTransportOptions(@Nullable TransportOptions transportOptions) {
return new ElasticsearchIndicesClient(this.transport, transportOptions);
}
// ----- Endpoint: indices.add_block
/**
* Add an index block.
*
* Add an index block to an index. Index blocks limit the operations allowed on
* an index by blocking specific operation types.
*
* @see Documentation
* on elastic.co
*/
public AddBlockResponse addBlock(AddBlockRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) AddBlockRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Add an index block.
*
* Add an index block to an index. Index blocks limit the operations allowed on
* an index by blocking specific operation types.
*
* @param fn
* a function that initializes a builder to create the
* {@link AddBlockRequest}
* @see Documentation
* on elastic.co
*/
public final AddBlockResponse addBlock(Function> fn)
throws IOException, ElasticsearchException {
return addBlock(fn.apply(new AddBlockRequest.Builder()).build());
}
// ----- Endpoint: indices.analyze
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @see Documentation
* on elastic.co
*/
public AnalyzeResponse analyze(AnalyzeRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) AnalyzeRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @param fn
* a function that initializes a builder to create the
* {@link AnalyzeRequest}
* @see Documentation
* on elastic.co
*/
public final AnalyzeResponse analyze(Function> fn)
throws IOException, ElasticsearchException {
return analyze(fn.apply(new AnalyzeRequest.Builder()).build());
}
/**
* Get tokens from text analysis. The analyze API performs analysis on a text
* string and returns the resulting tokens.
*
* Generating excessive amount of tokens may cause a node to run out of memory.
* The index.analyze.max_token_count
setting enables you to limit
* the number of tokens that can be produced. If more than this limit of tokens
* gets generated, an error occurs. The _analyze
endpoint without a
* specified index will always use 10000
as its limit.
*
* @see Documentation
* on elastic.co
*/
public AnalyzeResponse analyze() throws IOException, ElasticsearchException {
return this.transport.performRequest(new AnalyzeRequest.Builder().build(), AnalyzeRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.cancel_migrate_reindex
/**
* Cancel a migration reindex operation.
*
* Cancel a migration reindex attempt for a data stream or index.
*
* @see Documentation
* on elastic.co
*/
public CancelMigrateReindexResponse cancelMigrateReindex(CancelMigrateReindexRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CancelMigrateReindexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Cancel a migration reindex operation.
*
* Cancel a migration reindex attempt for a data stream or index.
*
* @param fn
* a function that initializes a builder to create the
* {@link CancelMigrateReindexRequest}
* @see Documentation
* on elastic.co
*/
public final CancelMigrateReindexResponse cancelMigrateReindex(
Function> fn)
throws IOException, ElasticsearchException {
return cancelMigrateReindex(fn.apply(new CancelMigrateReindexRequest.Builder()).build());
}
// ----- Endpoint: indices.clear_cache
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @see Documentation
* on elastic.co
*/
public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ClearCacheRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @param fn
* a function that initializes a builder to create the
* {@link ClearCacheRequest}
* @see Documentation
* on elastic.co
*/
public final ClearCacheResponse clearCache(Function> fn)
throws IOException, ElasticsearchException {
return clearCache(fn.apply(new ClearCacheRequest.Builder()).build());
}
/**
* Clear the cache. Clear the cache of one or more indices. For data streams,
* the API clears the caches of the stream's backing indices.
*
* By default, the clear cache API clears all caches. To clear only specific
* caches, use the fielddata
, query
, or
* request
parameters. To clear the cache only of specific fields,
* use the fields
parameter.
*
* @see Documentation
* on elastic.co
*/
public ClearCacheResponse clearCache() throws IOException, ElasticsearchException {
return this.transport.performRequest(new ClearCacheRequest.Builder().build(), ClearCacheRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.clone
/**
* Clone an index. Clone an existing index into a new index. Each original
* primary shard is cloned into a new primary shard in the new index.
*
* IMPORTANT: Elasticsearch does not apply index templates to the resulting
* index. The API also does not copy index metadata from the original index.
* Index metadata includes aliases, index lifecycle management phase
* definitions, and cross-cluster replication (CCR) follower information. For
* example, if you clone a CCR follower index, the resulting clone will not be a
* follower index.
*
* The clone API copies most index settings from the source index to the
* resulting index, with the exception of index.number_of_replicas
* and index.auto_expand_replicas
. To set the number of replicas in
* the resulting index, configure these settings in the clone request.
*
* Cloning works as follows:
*
* - First, it creates a new target index with the same definition as the
* source index.
* - Then it hard-links segments from the source index into the target index.
* If the file system does not support hard-linking, all segments are copied
* into the new index, which is a much more time consuming process.
* - Finally, it recovers the target index as though it were a closed index
* which had just been re-opened.
*
*
* IMPORTANT: Indices can only be cloned if they meet the following
* requirements:
*
* - The index must be marked as read-only and have a cluster health status of
* green.
* - The target index must not exist.
* - The source index must have the same number of primary shards as the
* target index.
* - The node handling the clone process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* The current write index on a data stream cannot be cloned. In order to clone
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be cloned.
*
* NOTE: Mappings cannot be specified in the _clone
request. The
* mappings of the source index will be used for the target index.
*
* Monitor the cloning process
*
* The cloning process can be monitored with the cat recovery API or the cluster
* health API can be used to wait until all primary shards have been allocated
* by setting the wait_for_status
parameter to yellow
.
*
* The _clone
API returns as soon as the target index has been
* added to the cluster state, before any shards have been allocated. At this
* point, all shards are in the state unassigned. If, for any reason, the target
* index can't be allocated, its primary shard will remain unassigned until it
* can be allocated on that node.
*
* Once the primary shard is allocated, it moves to state initializing, and the
* clone process begins. When the clone operation completes, the shard will
* become active. At that point, Elasticsearch will try to allocate any replicas
* and may decide to relocate the primary shard to another node.
*
* Wait for active shards
*
* Because the clone operation creates a new index to clone the shards to, the
* wait for active shards setting on index creation applies to the clone index
* action as well.
*
* @see Documentation
* on elastic.co
*/
public CloneIndexResponse clone(CloneIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CloneIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Clone an index. Clone an existing index into a new index. Each original
* primary shard is cloned into a new primary shard in the new index.
*
* IMPORTANT: Elasticsearch does not apply index templates to the resulting
* index. The API also does not copy index metadata from the original index.
* Index metadata includes aliases, index lifecycle management phase
* definitions, and cross-cluster replication (CCR) follower information. For
* example, if you clone a CCR follower index, the resulting clone will not be a
* follower index.
*
* The clone API copies most index settings from the source index to the
* resulting index, with the exception of index.number_of_replicas
* and index.auto_expand_replicas
. To set the number of replicas in
* the resulting index, configure these settings in the clone request.
*
* Cloning works as follows:
*
* - First, it creates a new target index with the same definition as the
* source index.
* - Then it hard-links segments from the source index into the target index.
* If the file system does not support hard-linking, all segments are copied
* into the new index, which is a much more time consuming process.
* - Finally, it recovers the target index as though it were a closed index
* which had just been re-opened.
*
*
* IMPORTANT: Indices can only be cloned if they meet the following
* requirements:
*
* - The index must be marked as read-only and have a cluster health status of
* green.
* - The target index must not exist.
* - The source index must have the same number of primary shards as the
* target index.
* - The node handling the clone process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* The current write index on a data stream cannot be cloned. In order to clone
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be cloned.
*
* NOTE: Mappings cannot be specified in the _clone
request. The
* mappings of the source index will be used for the target index.
*
* Monitor the cloning process
*
* The cloning process can be monitored with the cat recovery API or the cluster
* health API can be used to wait until all primary shards have been allocated
* by setting the wait_for_status
parameter to yellow
.
*
* The _clone
API returns as soon as the target index has been
* added to the cluster state, before any shards have been allocated. At this
* point, all shards are in the state unassigned. If, for any reason, the target
* index can't be allocated, its primary shard will remain unassigned until it
* can be allocated on that node.
*
* Once the primary shard is allocated, it moves to state initializing, and the
* clone process begins. When the clone operation completes, the shard will
* become active. At that point, Elasticsearch will try to allocate any replicas
* and may decide to relocate the primary shard to another node.
*
* Wait for active shards
*
* Because the clone operation creates a new index to clone the shards to, the
* wait for active shards setting on index creation applies to the clone index
* action as well.
*
* @param fn
* a function that initializes a builder to create the
* {@link CloneIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CloneIndexResponse clone(Function> fn)
throws IOException, ElasticsearchException {
return clone(fn.apply(new CloneIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.close
/**
* Close an index. A closed index is blocked for read or write operations and
* does not allow all operations that opened indices allow. It is not possible
* to index documents or to search for documents in a closed index. Closed
* indices do not have to maintain internal data structures for indexing or
* searching documents, which results in a smaller overhead on the cluster.
*
* When opening or closing an index, the master node is responsible for
* restarting the index shards to reflect the new state of the index. The shards
* will then go through the normal recovery process. The data of opened and
* closed indices is automatically replicated by the cluster to ensure that
* enough shard copies are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behaviour can be turned off using
* the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change
* the action.destructive_requires_name
setting to
* false
. This setting can also be changed with the cluster update
* settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* @see Documentation
* on elastic.co
*/
public CloseIndexResponse close(CloseIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CloseIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Close an index. A closed index is blocked for read or write operations and
* does not allow all operations that opened indices allow. It is not possible
* to index documents or to search for documents in a closed index. Closed
* indices do not have to maintain internal data structures for indexing or
* searching documents, which results in a smaller overhead on the cluster.
*
* When opening or closing an index, the master node is responsible for
* restarting the index shards to reflect the new state of the index. The shards
* will then go through the normal recovery process. The data of opened and
* closed indices is automatically replicated by the cluster to ensure that
* enough shard copies are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behaviour can be turned off using
* the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change
* the action.destructive_requires_name
setting to
* false
. This setting can also be changed with the cluster update
* settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* @param fn
* a function that initializes a builder to create the
* {@link CloseIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CloseIndexResponse close(Function> fn)
throws IOException, ElasticsearchException {
return close(fn.apply(new CloseIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.create
/**
* Create an index. You can use the create index API to add a new index to an
* Elasticsearch cluster. When creating an index, you can specify the following:
*
* - Settings for the index.
* - Mappings for fields in the index.
* - Index aliases
*
*
* Wait for active shards
*
* By default, index creation will only return a response to the client when the
* primary copies of each shard have been started, or the request times out. The
* index creation response will indicate what happened. For example,
* acknowledged
indicates whether the index was successfully
* created in the cluster, while shards_acknowledged
indicates
* whether the requisite number of shard copies were started for each shard in
* the index before timing out. Note that it is still possible for either
* acknowledged
or shards_acknowledged
to be
* false
, but for the index creation to be successful. These values
* simply indicate whether the operation completed before the timeout. If
* acknowledged
is false, the request timed out before the cluster
* state was updated with the newly created index, but it probably will be
* created sometime soon. If shards_acknowledged
is false, then the
* request timed out before the requisite number of shards were started (by
* default just the primaries), even if the cluster state was successfully
* updated to reflect the newly created index (that is to say,
* acknowledged
is true
).
*
* You can change the default of only waiting for the primary shards to start
* through the index setting index.write.wait_for_active_shards
.
* Note that changing this setting will also affect the
* wait_for_active_shards
value on all subsequent write operations.
*
* @see Documentation
* on elastic.co
*/
public CreateIndexResponse create(CreateIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create an index. You can use the create index API to add a new index to an
* Elasticsearch cluster. When creating an index, you can specify the following:
*
* - Settings for the index.
* - Mappings for fields in the index.
* - Index aliases
*
*
* Wait for active shards
*
* By default, index creation will only return a response to the client when the
* primary copies of each shard have been started, or the request times out. The
* index creation response will indicate what happened. For example,
* acknowledged
indicates whether the index was successfully
* created in the cluster, while shards_acknowledged
indicates
* whether the requisite number of shard copies were started for each shard in
* the index before timing out. Note that it is still possible for either
* acknowledged
or shards_acknowledged
to be
* false
, but for the index creation to be successful. These values
* simply indicate whether the operation completed before the timeout. If
* acknowledged
is false, the request timed out before the cluster
* state was updated with the newly created index, but it probably will be
* created sometime soon. If shards_acknowledged
is false, then the
* request timed out before the requisite number of shards were started (by
* default just the primaries), even if the cluster state was successfully
* updated to reflect the newly created index (that is to say,
* acknowledged
is true
).
*
* You can change the default of only waiting for the primary shards to start
* through the index setting index.write.wait_for_active_shards
.
* Note that changing this setting will also affect the
* wait_for_active_shards
value on all subsequent write operations.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateIndexRequest}
* @see Documentation
* on elastic.co
*/
public final CreateIndexResponse create(Function> fn)
throws IOException, ElasticsearchException {
return create(fn.apply(new CreateIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.create_data_stream
/**
* Create a data stream.
*
* You must have a matching index template with data stream enabled.
*
* @see Documentation
* on elastic.co
*/
public CreateDataStreamResponse createDataStream(CreateDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create a data stream.
*
* You must have a matching index template with data stream enabled.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final CreateDataStreamResponse createDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return createDataStream(fn.apply(new CreateDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.create_from
/**
* Create an index from a source index.
*
* Copy the mappings and settings from the source index to a destination index
* while allowing request settings and mappings to override the source values.
*
* @see Documentation
* on elastic.co
*/
public CreateFromResponse createFrom(CreateFromRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) CreateFromRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create an index from a source index.
*
* Copy the mappings and settings from the source index to a destination index
* while allowing request settings and mappings to override the source values.
*
* @param fn
* a function that initializes a builder to create the
* {@link CreateFromRequest}
* @see Documentation
* on elastic.co
*/
public final CreateFromResponse createFrom(Function> fn)
throws IOException, ElasticsearchException {
return createFrom(fn.apply(new CreateFromRequest.Builder()).build());
}
// ----- Endpoint: indices.data_streams_stats
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public DataStreamsStatsResponse dataStreamsStats(DataStreamsStatsRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DataStreamsStatsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link DataStreamsStatsRequest}
* @see Documentation
* on elastic.co
*/
public final DataStreamsStatsResponse dataStreamsStats(
Function> fn)
throws IOException, ElasticsearchException {
return dataStreamsStats(fn.apply(new DataStreamsStatsRequest.Builder()).build());
}
/**
* Get data stream stats.
*
* Get statistics for one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public DataStreamsStatsResponse dataStreamsStats() throws IOException, ElasticsearchException {
return this.transport.performRequest(new DataStreamsStatsRequest.Builder().build(),
DataStreamsStatsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.delete
/**
* Delete indices. Deleting an index deletes its documents, shards, and
* metadata. It does not delete related Kibana components, such as data views,
* visualizations, or dashboards.
*
* You cannot delete the current write index of a data stream. To delete the
* index, you must roll over the data stream so a new write index is created.
* You can then use the delete index API to delete the previous write index.
*
* @see Documentation
* on elastic.co
*/
public DeleteIndexResponse delete(DeleteIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete indices. Deleting an index deletes its documents, shards, and
* metadata. It does not delete related Kibana components, such as data views,
* visualizations, or dashboards.
*
* You cannot delete the current write index of a data stream. To delete the
* index, you must roll over the data stream so a new write index is created.
* You can then use the delete index API to delete the previous write index.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteIndexRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteIndexResponse delete(Function> fn)
throws IOException, ElasticsearchException {
return delete(fn.apply(new DeleteIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_alias
/**
* Delete an alias. Removes a data stream or index from an alias.
*
* @see Documentation
* on elastic.co
*/
public DeleteAliasResponse deleteAlias(DeleteAliasRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteAliasRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete an alias. Removes a data stream or index from an alias.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteAliasRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteAliasResponse deleteAlias(
Function> fn)
throws IOException, ElasticsearchException {
return deleteAlias(fn.apply(new DeleteAliasRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_data_lifecycle
/**
* Delete data stream lifecycles. Removes the data stream lifecycle from a data
* stream, rendering it not managed by the data stream lifecycle.
*
* @see Documentation
* on elastic.co
*/
public DeleteDataLifecycleResponse deleteDataLifecycle(DeleteDataLifecycleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteDataLifecycleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete data stream lifecycles. Removes the data stream lifecycle from a data
* stream, rendering it not managed by the data stream lifecycle.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteDataLifecycleResponse deleteDataLifecycle(
Function> fn)
throws IOException, ElasticsearchException {
return deleteDataLifecycle(fn.apply(new DeleteDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_data_stream
/**
* Delete data streams. Deletes one or more data streams and their backing
* indices.
*
* @see Documentation
* on elastic.co
*/
public DeleteDataStreamResponse deleteDataStream(DeleteDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete data streams. Deletes one or more data streams and their backing
* indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteDataStreamResponse deleteDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return deleteDataStream(fn.apply(new DeleteDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_index_template
/**
* Delete an index template. The provided <index-template> may contain
* multiple template names separated by a comma. If multiple template names are
* specified then there is no wildcard support and the provided names should
* match completely with existing templates.
*
* @see Documentation
* on elastic.co
*/
public DeleteIndexTemplateResponse deleteIndexTemplate(DeleteIndexTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteIndexTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete an index template. The provided <index-template> may contain
* multiple template names separated by a comma. If multiple template names are
* specified then there is no wildcard support and the provided names should
* match completely with existing templates.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteIndexTemplateResponse deleteIndexTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return deleteIndexTemplate(fn.apply(new DeleteIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.delete_template
/**
* Delete a legacy index template. IMPORTANT: This documentation is about legacy
* index templates, which are deprecated and will be replaced by the composable
* templates introduced in Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public DeleteTemplateResponse deleteTemplate(DeleteTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DeleteTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Delete a legacy index template. IMPORTANT: This documentation is about legacy
* index templates, which are deprecated and will be replaced by the composable
* templates introduced in Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link DeleteTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final DeleteTemplateResponse deleteTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return deleteTemplate(fn.apply(new DeleteTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.disk_usage
/**
* Analyze the index disk usage. Analyze the disk usage of each field of an
* index or data stream. This API might not support indices created in previous
* Elasticsearch versions. The result of a small index can be inaccurate as some
* parts of an index might not be analyzed by the API.
*
* NOTE: The total size of fields of the analyzed shards of the index in the
* response is usually smaller than the index store_size
value
* because some small metadata files are ignored and some parts of data files
* might not be scanned by the API. Since stored fields are stored together in a
* compressed format, the sizes of stored fields are also estimates and can be
* inaccurate. The stored size of the _id
field is likely
* underestimated while the _source
field is overestimated.
*
* @see Documentation
* on elastic.co
*/
public DiskUsageResponse diskUsage(DiskUsageRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DiskUsageRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Analyze the index disk usage. Analyze the disk usage of each field of an
* index or data stream. This API might not support indices created in previous
* Elasticsearch versions. The result of a small index can be inaccurate as some
* parts of an index might not be analyzed by the API.
*
* NOTE: The total size of fields of the analyzed shards of the index in the
* response is usually smaller than the index store_size
value
* because some small metadata files are ignored and some parts of data files
* might not be scanned by the API. Since stored fields are stored together in a
* compressed format, the sizes of stored fields are also estimates and can be
* inaccurate. The stored size of the _id
field is likely
* underestimated while the _source
field is overestimated.
*
* @param fn
* a function that initializes a builder to create the
* {@link DiskUsageRequest}
* @see Documentation
* on elastic.co
*/
public final DiskUsageResponse diskUsage(Function> fn)
throws IOException, ElasticsearchException {
return diskUsage(fn.apply(new DiskUsageRequest.Builder()).build());
}
// ----- Endpoint: indices.downsample
/**
* Downsample an index. Aggregate a time series (TSDS) index and store
* pre-computed statistical summaries (min
, max
,
* sum
, value_count
and avg
) for each
* metric field grouped by a configured time interval. For example, a TSDS index
* that contains metrics sampled every 10 seconds can be downsampled to an
* hourly index. All documents within an hour interval are summarized and stored
* as a single document in the downsample index.
*
* NOTE: Only indices in a time series data stream are supported. Neither field
* nor document level security can be defined on the source index. The source
* index must be read only (index.blocks.write: true
).
*
* @see Documentation
* on elastic.co
*/
public DownsampleResponse downsample(DownsampleRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) DownsampleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Downsample an index. Aggregate a time series (TSDS) index and store
* pre-computed statistical summaries (min
, max
,
* sum
, value_count
and avg
) for each
* metric field grouped by a configured time interval. For example, a TSDS index
* that contains metrics sampled every 10 seconds can be downsampled to an
* hourly index. All documents within an hour interval are summarized and stored
* as a single document in the downsample index.
*
* NOTE: Only indices in a time series data stream are supported. Neither field
* nor document level security can be defined on the source index. The source
* index must be read only (index.blocks.write: true
).
*
* @param fn
* a function that initializes a builder to create the
* {@link DownsampleRequest}
* @see Documentation
* on elastic.co
*/
public final DownsampleResponse downsample(Function> fn)
throws IOException, ElasticsearchException {
return downsample(fn.apply(new DownsampleRequest.Builder()).build());
}
// ----- Endpoint: indices.exists
/**
* Check indices. Check if one or more indices, index aliases, or data streams
* exist.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse exists(ExistsRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check indices. Check if one or more indices, index aliases, or data streams
* exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse exists(Function> fn)
throws IOException, ElasticsearchException {
return exists(fn.apply(new ExistsRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_alias
/**
* Check aliases.
*
* Check if one or more data stream or index aliases exist.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse existsAlias(ExistsAliasRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsAliasRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check aliases.
*
* Check if one or more data stream or index aliases exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsAliasRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse existsAlias(Function> fn)
throws IOException, ElasticsearchException {
return existsAlias(fn.apply(new ExistsAliasRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_index_template
/**
* Check index templates.
*
* Check whether index templates exist.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse existsIndexTemplate(ExistsIndexTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsIndexTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check index templates.
*
* Check whether index templates exist.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse existsIndexTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return existsIndexTemplate(fn.apply(new ExistsIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.exists_template
/**
* Check existence of index templates. Get information about whether index
* templates exist. Index templates define settings, mappings, and aliases that
* can be applied automatically to new indices.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public BooleanResponse existsTemplate(ExistsTemplateRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
Endpoint endpoint = (Endpoint) ExistsTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Check existence of index templates. Get information about whether index
* templates exist. Index templates define settings, mappings, and aliases that
* can be applied automatically to new indices.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExistsTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final BooleanResponse existsTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return existsTemplate(fn.apply(new ExistsTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.explain_data_lifecycle
/**
* Get the status for a data stream lifecycle. Get information about an index or
* data stream's current data stream lifecycle status, such as time since index
* creation, time since rollover, the lifecycle configuration managing the
* index, or any errors encountered during lifecycle execution.
*
* @see Documentation
* on elastic.co
*/
public ExplainDataLifecycleResponse explainDataLifecycle(ExplainDataLifecycleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ExplainDataLifecycleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get the status for a data stream lifecycle. Get information about an index or
* data stream's current data stream lifecycle status, such as time since index
* creation, time since rollover, the lifecycle configuration managing the
* index, or any errors encountered during lifecycle execution.
*
* @param fn
* a function that initializes a builder to create the
* {@link ExplainDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final ExplainDataLifecycleResponse explainDataLifecycle(
Function> fn)
throws IOException, ElasticsearchException {
return explainDataLifecycle(fn.apply(new ExplainDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.field_usage_stats
/**
* Get field usage stats. Get field usage information for each shard and field
* of an index. Field usage statistics are automatically captured when queries
* are running on a cluster. A shard-level search request that accesses a given
* field, even if multiple times during that request, is counted as a single
* use.
*
* The response body reports the per-shard usage count of the data structures
* that back the fields in the index. A given request will increment each count
* by a maximum value of 1, even if the request accesses the same field multiple
* times.
*
* @see Documentation
* on elastic.co
*/
public FieldUsageStatsResponse fieldUsageStats(FieldUsageStatsRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FieldUsageStatsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get field usage stats. Get field usage information for each shard and field
* of an index. Field usage statistics are automatically captured when queries
* are running on a cluster. A shard-level search request that accesses a given
* field, even if multiple times during that request, is counted as a single
* use.
*
* The response body reports the per-shard usage count of the data structures
* that back the fields in the index. A given request will increment each count
* by a maximum value of 1, even if the request accesses the same field multiple
* times.
*
* @param fn
* a function that initializes a builder to create the
* {@link FieldUsageStatsRequest}
* @see Documentation
* on elastic.co
*/
public final FieldUsageStatsResponse fieldUsageStats(
Function> fn)
throws IOException, ElasticsearchException {
return fieldUsageStats(fn.apply(new FieldUsageStatsRequest.Builder()).build());
}
// ----- Endpoint: indices.flush
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @see Documentation
* on elastic.co
*/
public FlushResponse flush(FlushRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) FlushRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @param fn
* a function that initializes a builder to create the
* {@link FlushRequest}
* @see Documentation
* on elastic.co
*/
public final FlushResponse flush(Function> fn)
throws IOException, ElasticsearchException {
return flush(fn.apply(new FlushRequest.Builder()).build());
}
/**
* Flush data streams or indices. Flushing a data stream or index is the process
* of making sure that any data that is currently only stored in the transaction
* log is also permanently stored in the Lucene index. When restarting,
* Elasticsearch replays any unflushed operations from the transaction log into
* the Lucene index to bring it back into the state that it was in before the
* restart. Elasticsearch automatically triggers flushes as needed, using
* heuristics that trade off the size of the unflushed transaction log against
* the cost of performing each flush.
*
* After each operation has been flushed it is permanently stored in the Lucene
* index. This may mean that there is no need to maintain an additional copy of
* it in the transaction log. The transaction log is made up of multiple files,
* called generations, and Elasticsearch will delete any generation files when
* they are no longer needed, freeing up disk space.
*
* It is also possible to trigger a flush on one or more indices using the flush
* API, although it is rare for users to need to call this API directly. If you
* call the flush API after indexing some documents then a successful response
* indicates that Elasticsearch has flushed all the documents that were indexed
* before the flush API was called.
*
* @see Documentation
* on elastic.co
*/
public FlushResponse flush() throws IOException, ElasticsearchException {
return this.transport.performRequest(new FlushRequest.Builder().build(), FlushRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.forcemerge
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @see Documentation
* on elastic.co
*/
public ForcemergeResponse forcemerge(ForcemergeRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ForcemergeRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @param fn
* a function that initializes a builder to create the
* {@link ForcemergeRequest}
* @see Documentation
* on elastic.co
*/
public final ForcemergeResponse forcemerge(Function> fn)
throws IOException, ElasticsearchException {
return forcemerge(fn.apply(new ForcemergeRequest.Builder()).build());
}
/**
* Force a merge. Perform the force merge operation on the shards of one or more
* indices. For data streams, the API forces a merge on the shards of the
* stream's backing indices.
*
* Merging reduces the number of segments in each shard by merging some of them
* together and also frees up the space used by deleted documents. Merging
* normally happens automatically, but sometimes it is useful to trigger a merge
* manually.
*
* WARNING: We recommend force merging only a read-only index (meaning the index
* is no longer receiving writes). When documents are updated or deleted, the
* old version is not immediately removed but instead soft-deleted and marked
* with a "tombstone". These soft-deleted documents are automatically
* cleaned up during regular segment merges. But force merge can cause very
* large (greater than 5 GB) segments to be produced, which are not eligible for
* regular merges. So the number of soft-deleted documents can then grow
* rapidly, resulting in higher disk usage and worse search performance. If you
* regularly force merge an index receiving writes, this can also make snapshots
* more expensive, since the new documents can't be backed up incrementally.
*
* Blocks during a force merge
*
* Calls to this API block until the merge is complete (unless request contains
* wait_for_completion=false
). If the client connection is lost
* before completion then the force merge process will continue in the
* background. Any new requests to force merge the same indices will also block
* until the ongoing force merge is complete.
*
* Running force merge asynchronously
*
* If the request contains wait_for_completion=false
, Elasticsearch
* performs some preflight checks, launches the request, and returns a task you
* can use to get the status of the task. However, you can not cancel this task
* as the force merge task is not cancelable. Elasticsearch creates a record of
* this task as a document at _tasks/<task_id>
. When you are
* done with a task, you should delete the task document so Elasticsearch can
* reclaim the space.
*
* Force merging multiple indices
*
* You can force merge multiple indices with a single request by targeting:
*
* - One or more data streams that contain multiple backing indices
* - Multiple indices
* - One or more aliases
* - All data streams and indices in a cluster
*
*
* Each targeted shard is force-merged separately using the force_merge
* threadpool. By default each node only has a single force_merge
* thread which means that the shards on that node are force-merged one at a
* time. If you expand the force_merge
threadpool on a node then it
* will force merge its shards in parallel
*
* Force merge makes the storage for the shard being merged temporarily
* increase, as it may require free space up to triple its size in case
* max_num_segments parameter
is set to 1
, to rewrite
* all segments into a new one.
*
* Data streams and time-based indices
*
* Force-merging is useful for managing a data stream's older backing indices
* and other time-based indices, particularly after a rollover. In these cases,
* each index only receives indexing traffic for a certain period of time. Once
* an index receive no more writes, its shards can be force-merged to a single
* segment. This can be a good idea because single-segment shards can sometimes
* use simpler and more efficient data structures to perform searches. For
* example:
*
*
* POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
*
*
*
* @see Documentation
* on elastic.co
*/
public ForcemergeResponse forcemerge() throws IOException, ElasticsearchException {
return this.transport.performRequest(new ForcemergeRequest.Builder().build(), ForcemergeRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get
/**
* Get index information. Get information about one or more indices. For data
* streams, the API returns information about the stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public GetIndexResponse get(GetIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index information. Get information about one or more indices. For data
* streams, the API returns information about the stream’s backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndexRequest}
* @see Documentation
* on elastic.co
*/
public final GetIndexResponse get(Function> fn)
throws IOException, ElasticsearchException {
return get(fn.apply(new GetIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.get_alias
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @see Documentation
* on elastic.co
*/
public GetAliasResponse getAlias(GetAliasRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetAliasRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetAliasRequest}
* @see Documentation
* on elastic.co
*/
public final GetAliasResponse getAlias(Function> fn)
throws IOException, ElasticsearchException {
return getAlias(fn.apply(new GetAliasRequest.Builder()).build());
}
/**
* Get aliases. Retrieves information for one or more data stream or index
* aliases.
*
* @see Documentation
* on elastic.co
*/
public GetAliasResponse getAlias() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetAliasRequest.Builder().build(), GetAliasRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get_data_lifecycle
/**
* Get data stream lifecycles.
*
* Get the data stream lifecycle configuration of one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public GetDataLifecycleResponse getDataLifecycle(GetDataLifecycleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetDataLifecycleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get data stream lifecycles.
*
* Get the data stream lifecycle configuration of one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final GetDataLifecycleResponse getDataLifecycle(
Function> fn)
throws IOException, ElasticsearchException {
return getDataLifecycle(fn.apply(new GetDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.get_data_lifecycle_stats
/**
* Get data stream lifecycle stats. Get statistics about the data streams that
* are managed by a data stream lifecycle.
*
* @see Documentation
* on elastic.co
*/
public GetDataLifecycleStatsResponse getDataLifecycleStats() throws IOException, ElasticsearchException {
return this.transport.performRequest(GetDataLifecycleStatsRequest._INSTANCE,
GetDataLifecycleStatsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_data_stream
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public GetDataStreamResponse getDataStream(GetDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final GetDataStreamResponse getDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return getDataStream(fn.apply(new GetDataStreamRequest.Builder()).build());
}
/**
* Get data streams.
*
* Get information about one or more data streams.
*
* @see Documentation
* on elastic.co
*/
public GetDataStreamResponse getDataStream() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetDataStreamRequest.Builder().build(), GetDataStreamRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get_field_mapping
/**
* Get mapping definitions. Retrieves mapping definitions for one or more
* fields. For data streams, the API retrieves field mappings for the stream’s
* backing indices.
*
* This API is useful if you don't need a complete mapping or if an index
* mapping contains a large number of fields.
*
* @see Documentation
* on elastic.co
*/
public GetFieldMappingResponse getFieldMapping(GetFieldMappingRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetFieldMappingRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get mapping definitions. Retrieves mapping definitions for one or more
* fields. For data streams, the API retrieves field mappings for the stream’s
* backing indices.
*
* This API is useful if you don't need a complete mapping or if an index
* mapping contains a large number of fields.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetFieldMappingRequest}
* @see Documentation
* on elastic.co
*/
public final GetFieldMappingResponse getFieldMapping(
Function> fn)
throws IOException, ElasticsearchException {
return getFieldMapping(fn.apply(new GetFieldMappingRequest.Builder()).build());
}
// ----- Endpoint: indices.get_index_template
/**
* Get index templates. Get information about one or more index templates.
*
* @see Documentation
* on elastic.co
*/
public GetIndexTemplateResponse getIndexTemplate(GetIndexTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndexTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index templates. Get information about one or more index templates.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final GetIndexTemplateResponse getIndexTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return getIndexTemplate(fn.apply(new GetIndexTemplateRequest.Builder()).build());
}
/**
* Get index templates. Get information about one or more index templates.
*
* @see Documentation
* on elastic.co
*/
public GetIndexTemplateResponse getIndexTemplate() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetIndexTemplateRequest.Builder().build(),
GetIndexTemplateRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_mapping
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public GetMappingResponse getMapping(GetMappingRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetMappingRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetMappingRequest}
* @see Documentation
* on elastic.co
*/
public final GetMappingResponse getMapping(Function> fn)
throws IOException, ElasticsearchException {
return getMapping(fn.apply(new GetMappingRequest.Builder()).build());
}
/**
* Get mapping definitions. For data streams, the API retrieves mappings for the
* stream’s backing indices.
*
* @see Documentation
* on elastic.co
*/
public GetMappingResponse getMapping() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetMappingRequest.Builder().build(), GetMappingRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.get_migrate_reindex_status
/**
* Get the migration reindexing status.
*
* Get the status of a migration reindex attempt for a data stream or index.
*
* @see Documentation
* on elastic.co
*/
public GetMigrateReindexStatusResponse getMigrateReindexStatus(GetMigrateReindexStatusRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetMigrateReindexStatusRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get the migration reindexing status.
*
* Get the status of a migration reindex attempt for a data stream or index.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetMigrateReindexStatusRequest}
* @see Documentation
* on elastic.co
*/
public final GetMigrateReindexStatusResponse getMigrateReindexStatus(
Function> fn)
throws IOException, ElasticsearchException {
return getMigrateReindexStatus(fn.apply(new GetMigrateReindexStatusRequest.Builder()).build());
}
// ----- Endpoint: indices.get_settings
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public GetIndicesSettingsResponse getSettings(GetIndicesSettingsRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetIndicesSettingsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetIndicesSettingsRequest}
* @see Documentation
* on elastic.co
*/
public final GetIndicesSettingsResponse getSettings(
Function> fn)
throws IOException, ElasticsearchException {
return getSettings(fn.apply(new GetIndicesSettingsRequest.Builder()).build());
}
/**
* Get index settings. Get setting information for one or more indices. For data
* streams, it returns setting information for the stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public GetIndicesSettingsResponse getSettings() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetIndicesSettingsRequest.Builder().build(),
GetIndicesSettingsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.get_template
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public GetTemplateResponse getTemplate(GetTemplateRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) GetTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @param fn
* a function that initializes a builder to create the
* {@link GetTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final GetTemplateResponse getTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return getTemplate(fn.apply(new GetTemplateRequest.Builder()).build());
}
/**
* Get legacy index templates. Get information about one or more index
* templates.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* @see Documentation
* on elastic.co
*/
public GetTemplateResponse getTemplate() throws IOException, ElasticsearchException {
return this.transport.performRequest(new GetTemplateRequest.Builder().build(), GetTemplateRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.migrate_reindex
/**
* Reindex legacy backing indices.
*
* Reindex all legacy backing indices for a data stream. This operation occurs
* in a persistent task. The persistent task ID is returned immediately and the
* reindexing work is completed in that task.
*
* @see Documentation
* on elastic.co
*/
public MigrateReindexResponse migrateReindex(MigrateReindexRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) MigrateReindexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Reindex legacy backing indices.
*
* Reindex all legacy backing indices for a data stream. This operation occurs
* in a persistent task. The persistent task ID is returned immediately and the
* reindexing work is completed in that task.
*
* @param fn
* a function that initializes a builder to create the
* {@link MigrateReindexRequest}
* @see Documentation
* on elastic.co
*/
public final MigrateReindexResponse migrateReindex(
Function> fn)
throws IOException, ElasticsearchException {
return migrateReindex(fn.apply(new MigrateReindexRequest.Builder()).build());
}
/**
* Reindex legacy backing indices.
*
* Reindex all legacy backing indices for a data stream. This operation occurs
* in a persistent task. The persistent task ID is returned immediately and the
* reindexing work is completed in that task.
*
* @see Documentation
* on elastic.co
*/
public MigrateReindexResponse migrateReindex() throws IOException, ElasticsearchException {
return this.transport.performRequest(new MigrateReindexRequest.Builder().build(),
MigrateReindexRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.migrate_to_data_stream
/**
* Convert an index alias to a data stream. Converts an index alias to a data
* stream. You must have a matching index template that is data stream enabled.
* The alias must meet the following criteria: The alias must have a write
* index; All indices for the alias must have a @timestamp
field
* mapping of a date
or date_nanos
field type; The
* alias must not have any filters; The alias must not use custom routing. If
* successful, the request removes the alias and creates a data stream with the
* same name. The indices for the alias become hidden backing indices for the
* stream. The write index for the alias becomes the write index for the stream.
*
* @see Documentation
* on elastic.co
*/
public MigrateToDataStreamResponse migrateToDataStream(MigrateToDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) MigrateToDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Convert an index alias to a data stream. Converts an index alias to a data
* stream. You must have a matching index template that is data stream enabled.
* The alias must meet the following criteria: The alias must have a write
* index; All indices for the alias must have a @timestamp
field
* mapping of a date
or date_nanos
field type; The
* alias must not have any filters; The alias must not use custom routing. If
* successful, the request removes the alias and creates a data stream with the
* same name. The indices for the alias become hidden backing indices for the
* stream. The write index for the alias becomes the write index for the stream.
*
* @param fn
* a function that initializes a builder to create the
* {@link MigrateToDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final MigrateToDataStreamResponse migrateToDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return migrateToDataStream(fn.apply(new MigrateToDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.modify_data_stream
/**
* Update data streams. Performs one or more data stream modification actions in
* a single atomic operation.
*
* @see Documentation
* on elastic.co
*/
public ModifyDataStreamResponse modifyDataStream(ModifyDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ModifyDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Update data streams. Performs one or more data stream modification actions in
* a single atomic operation.
*
* @param fn
* a function that initializes a builder to create the
* {@link ModifyDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final ModifyDataStreamResponse modifyDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return modifyDataStream(fn.apply(new ModifyDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.open
/**
* Open a closed index. For data streams, the API opens any closed backing
* indices.
*
* A closed index is blocked for read/write operations and does not allow all
* operations that opened indices allow. It is not possible to index documents
* or to search for documents in a closed index. This allows closed indices to
* not have to maintain internal data structures for indexing or searching
* documents, resulting in a smaller overhead on the cluster.
*
* When opening or closing an index, the master is responsible for restarting
* the index shards to reflect the new state of the index. The shards will then
* go through the normal recovery process. The data of opened or closed indices
* is automatically replicated by the cluster to ensure that enough shard copies
* are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behavior can be turned off by
* using the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change the
* action.destructive_requires_name
setting to false
.
* This setting can also be changed with the cluster update settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* Because opening or closing an index allocates its shards, the
* wait_for_active_shards
setting on index creation applies to the
* _open
and _close
index actions as well.
*
* @see Documentation
* on elastic.co
*/
public OpenResponse open(OpenRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) OpenRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Open a closed index. For data streams, the API opens any closed backing
* indices.
*
* A closed index is blocked for read/write operations and does not allow all
* operations that opened indices allow. It is not possible to index documents
* or to search for documents in a closed index. This allows closed indices to
* not have to maintain internal data structures for indexing or searching
* documents, resulting in a smaller overhead on the cluster.
*
* When opening or closing an index, the master is responsible for restarting
* the index shards to reflect the new state of the index. The shards will then
* go through the normal recovery process. The data of opened or closed indices
* is automatically replicated by the cluster to ensure that enough shard copies
* are safely kept around at all times.
*
* You can open and close multiple indices. An error is thrown if the request
* explicitly refers to a missing index. This behavior can be turned off by
* using the ignore_unavailable=true
parameter.
*
* By default, you must explicitly name the indices you are opening or closing.
* To open or close indices with _all
, *
, or other
* wildcard expressions, change the
* action.destructive_requires_name
setting to false
.
* This setting can also be changed with the cluster update settings API.
*
* Closed indices consume a significant amount of disk-space which can cause
* problems in managed environments. Closing indices can be turned off with the
* cluster settings API by setting cluster.indices.close.enable
to
* false
.
*
* Because opening or closing an index allocates its shards, the
* wait_for_active_shards
setting on index creation applies to the
* _open
and _close
index actions as well.
*
* @param fn
* a function that initializes a builder to create the
* {@link OpenRequest}
* @see Documentation
* on elastic.co
*/
public final OpenResponse open(Function> fn)
throws IOException, ElasticsearchException {
return open(fn.apply(new OpenRequest.Builder()).build());
}
// ----- Endpoint: indices.promote_data_stream
/**
* Promote a data stream. Promote a data stream from a replicated data stream
* managed by cross-cluster replication (CCR) to a regular data stream.
*
* With CCR auto following, a data stream from a remote cluster can be
* replicated to the local cluster. These data streams can't be rolled over in
* the local cluster. These replicated data streams roll over only if the
* upstream data stream rolls over. In the event that the remote cluster is no
* longer available, the data stream in the local cluster can be promoted to a
* regular data stream, which allows these data streams to be rolled over in the
* local cluster.
*
* NOTE: When promoting a data stream, ensure the local cluster has a data
* stream enabled index template that matches the data stream. If this is
* missing, the data stream will not be able to roll over until a matching index
* template is created. This will affect the lifecycle management of the data
* stream and interfere with the data stream size and retention.
*
* @see Documentation
* on elastic.co
*/
public PromoteDataStreamResponse promoteDataStream(PromoteDataStreamRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PromoteDataStreamRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Promote a data stream. Promote a data stream from a replicated data stream
* managed by cross-cluster replication (CCR) to a regular data stream.
*
* With CCR auto following, a data stream from a remote cluster can be
* replicated to the local cluster. These data streams can't be rolled over in
* the local cluster. These replicated data streams roll over only if the
* upstream data stream rolls over. In the event that the remote cluster is no
* longer available, the data stream in the local cluster can be promoted to a
* regular data stream, which allows these data streams to be rolled over in the
* local cluster.
*
* NOTE: When promoting a data stream, ensure the local cluster has a data
* stream enabled index template that matches the data stream. If this is
* missing, the data stream will not be able to roll over until a matching index
* template is created. This will affect the lifecycle management of the data
* stream and interfere with the data stream size and retention.
*
* @param fn
* a function that initializes a builder to create the
* {@link PromoteDataStreamRequest}
* @see Documentation
* on elastic.co
*/
public final PromoteDataStreamResponse promoteDataStream(
Function> fn)
throws IOException, ElasticsearchException {
return promoteDataStream(fn.apply(new PromoteDataStreamRequest.Builder()).build());
}
// ----- Endpoint: indices.put_alias
/**
* Create or update an alias. Adds a data stream or index to an alias.
*
* @see Documentation
* on elastic.co
*/
public PutAliasResponse putAlias(PutAliasRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutAliasRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create or update an alias. Adds a data stream or index to an alias.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutAliasRequest}
* @see Documentation
* on elastic.co
*/
public final PutAliasResponse putAlias(Function> fn)
throws IOException, ElasticsearchException {
return putAlias(fn.apply(new PutAliasRequest.Builder()).build());
}
// ----- Endpoint: indices.put_data_lifecycle
/**
* Update data stream lifecycles. Update the data stream lifecycle of the
* specified data streams.
*
* @see Documentation
* on elastic.co
*/
public PutDataLifecycleResponse putDataLifecycle(PutDataLifecycleRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutDataLifecycleRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Update data stream lifecycles. Update the data stream lifecycle of the
* specified data streams.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutDataLifecycleRequest}
* @see Documentation
* on elastic.co
*/
public final PutDataLifecycleResponse putDataLifecycle(
Function> fn)
throws IOException, ElasticsearchException {
return putDataLifecycle(fn.apply(new PutDataLifecycleRequest.Builder()).build());
}
// ----- Endpoint: indices.put_index_template
/**
* Create or update an index template. Index templates define settings,
* mappings, and aliases that can be applied automatically to new indices.
*
* Elasticsearch applies templates to new indices based on an wildcard pattern
* that matches the index name. Index templates are applied during data stream
* or index creation. For data streams, these settings and mappings are applied
* when the stream's backing indices are created. Settings and mappings
* specified in a create index API request override any settings or mappings
* specified in an index template. Changes to index templates do not affect
* existing indices, including the existing backing indices of a data stream.
*
* You can use C-style /* *\/
block comments in index templates.
* You can include comments anywhere in the request body, except before the
* opening curly bracket.
*
* Multiple matching templates
*
* If multiple index templates match the name of a new index or data stream, the
* template with the highest priority is used.
*
* Multiple templates with overlapping index patterns at the same priority are
* not allowed and an error will be thrown when attempting to create a template
* matching an existing index template at identical priorities.
*
* Composing aliases, mappings, and settings
*
* When multiple component templates are specified in the
* composed_of
field for an index template, they are merged in the
* order specified, meaning that later component templates override earlier
* component templates. Any mappings, settings, or aliases from the parent index
* template are merged in next. Finally, any configuration on the index request
* itself is merged. Mapping definitions are merged recursively, which means
* that later mapping components can introduce new field mappings and update the
* mapping configuration. If a field mapping is already contained in an earlier
* component, its definition will be completely overwritten by the later one.
* This recursive merging strategy applies not only to field mappings, but also
* root options like dynamic_templates
and meta
. If an
* earlier component contains a dynamic_templates
block, then by
* default new dynamic_templates
entries are appended onto the end.
* If an entry already exists with the same key, then it is overwritten by the
* new definition.
*
* @see Documentation
* on elastic.co
*/
public PutIndexTemplateResponse putIndexTemplate(PutIndexTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutIndexTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create or update an index template. Index templates define settings,
* mappings, and aliases that can be applied automatically to new indices.
*
* Elasticsearch applies templates to new indices based on an wildcard pattern
* that matches the index name. Index templates are applied during data stream
* or index creation. For data streams, these settings and mappings are applied
* when the stream's backing indices are created. Settings and mappings
* specified in a create index API request override any settings or mappings
* specified in an index template. Changes to index templates do not affect
* existing indices, including the existing backing indices of a data stream.
*
* You can use C-style /* *\/
block comments in index templates.
* You can include comments anywhere in the request body, except before the
* opening curly bracket.
*
* Multiple matching templates
*
* If multiple index templates match the name of a new index or data stream, the
* template with the highest priority is used.
*
* Multiple templates with overlapping index patterns at the same priority are
* not allowed and an error will be thrown when attempting to create a template
* matching an existing index template at identical priorities.
*
* Composing aliases, mappings, and settings
*
* When multiple component templates are specified in the
* composed_of
field for an index template, they are merged in the
* order specified, meaning that later component templates override earlier
* component templates. Any mappings, settings, or aliases from the parent index
* template are merged in next. Finally, any configuration on the index request
* itself is merged. Mapping definitions are merged recursively, which means
* that later mapping components can introduce new field mappings and update the
* mapping configuration. If a field mapping is already contained in an earlier
* component, its definition will be completely overwritten by the later one.
* This recursive merging strategy applies not only to field mappings, but also
* root options like dynamic_templates
and meta
. If an
* earlier component contains a dynamic_templates
block, then by
* default new dynamic_templates
entries are appended onto the end.
* If an entry already exists with the same key, then it is overwritten by the
* new definition.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final PutIndexTemplateResponse putIndexTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return putIndexTemplate(fn.apply(new PutIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.put_mapping
/**
* Update field mappings. Add new fields to an existing data stream or index.
* You can use the update mapping API to:
*
* - Add a new field to an existing index
* - Update mappings for multiple indices in a single request
* - Add new properties to an object field
* - Enable multi-fields for an existing field
* - Update supported mapping parameters
* - Change a field's mapping using reindexing
* - Rename a field using a field alias
*
*
* Learn how to use the update mapping API with practical examples in the
* Update
* mapping API examples guide.
*
* @see Documentation
* on elastic.co
*/
public PutMappingResponse putMapping(PutMappingRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutMappingRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Update field mappings. Add new fields to an existing data stream or index.
* You can use the update mapping API to:
*
* - Add a new field to an existing index
* - Update mappings for multiple indices in a single request
* - Add new properties to an object field
* - Enable multi-fields for an existing field
* - Update supported mapping parameters
* - Change a field's mapping using reindexing
* - Rename a field using a field alias
*
*
* Learn how to use the update mapping API with practical examples in the
* Update
* mapping API examples guide.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutMappingRequest}
* @see Documentation
* on elastic.co
*/
public final PutMappingResponse putMapping(Function> fn)
throws IOException, ElasticsearchException {
return putMapping(fn.apply(new PutMappingRequest.Builder()).build());
}
// ----- Endpoint: indices.put_settings
/**
* Update index settings. Changes dynamic index settings in real time. For data
* streams, index setting changes are applied to all backing indices by default.
*
* To revert a setting to the default value, use a null value. The list of
* per-index settings that can be updated dynamically on live indices can be
* found in index settings documentation. To preserve existing settings from
* being updated, set the preserve_existing
parameter to
* true
.
*
* For performance optimization during bulk indexing, you can disable the
* refresh interval. Refer to disable
* refresh interval for an example. There are multiple valid ways to
* represent index settings in the request body. You can specify only the
* setting, for example:
*
*
* {
* "number_of_replicas": 1
* }
*
*
*
* Or you can use an index
setting object:
*
*
* {
* "index": {
* "number_of_replicas": 1
* }
* }
*
*
*
* Or you can use dot annotation:
*
*
* {
* "index.number_of_replicas": 1
* }
*
*
*
* Or you can embed any of the aforementioned options in a settings
* object. For example:
*
*
* {
* "settings": {
* "index": {
* "number_of_replicas": 1
* }
* }
* }
*
*
*
* NOTE: You can only define new analyzers on closed indices. To add an
* analyzer, you must close the index, define the analyzer, and reopen the
* index. You cannot close the write index of a data stream. To update the
* analyzer for a data stream's write index and future backing indices, update
* the analyzer in the index template used by the stream. Then roll over the
* data stream to apply the new analyzer to the stream's write index and future
* backing indices. This affects searches and any new data added to the stream
* after the rollover. However, it does not affect the data stream's backing
* indices or their existing data. To change the analyzer for existing backing
* indices, you must create a new data stream and reindex your data into it.
* Refer to updating
* analyzers on existing indices for step-by-step examples.
*
* @see Documentation
* on elastic.co
*/
public PutIndicesSettingsResponse putSettings(PutIndicesSettingsRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutIndicesSettingsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Update index settings. Changes dynamic index settings in real time. For data
* streams, index setting changes are applied to all backing indices by default.
*
* To revert a setting to the default value, use a null value. The list of
* per-index settings that can be updated dynamically on live indices can be
* found in index settings documentation. To preserve existing settings from
* being updated, set the preserve_existing
parameter to
* true
.
*
* For performance optimization during bulk indexing, you can disable the
* refresh interval. Refer to disable
* refresh interval for an example. There are multiple valid ways to
* represent index settings in the request body. You can specify only the
* setting, for example:
*
*
* {
* "number_of_replicas": 1
* }
*
*
*
* Or you can use an index
setting object:
*
*
* {
* "index": {
* "number_of_replicas": 1
* }
* }
*
*
*
* Or you can use dot annotation:
*
*
* {
* "index.number_of_replicas": 1
* }
*
*
*
* Or you can embed any of the aforementioned options in a settings
* object. For example:
*
*
* {
* "settings": {
* "index": {
* "number_of_replicas": 1
* }
* }
* }
*
*
*
* NOTE: You can only define new analyzers on closed indices. To add an
* analyzer, you must close the index, define the analyzer, and reopen the
* index. You cannot close the write index of a data stream. To update the
* analyzer for a data stream's write index and future backing indices, update
* the analyzer in the index template used by the stream. Then roll over the
* data stream to apply the new analyzer to the stream's write index and future
* backing indices. This affects searches and any new data added to the stream
* after the rollover. However, it does not affect the data stream's backing
* indices or their existing data. To change the analyzer for existing backing
* indices, you must create a new data stream and reindex your data into it.
* Refer to updating
* analyzers on existing indices for step-by-step examples.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutIndicesSettingsRequest}
* @see Documentation
* on elastic.co
*/
public final PutIndicesSettingsResponse putSettings(
Function> fn)
throws IOException, ElasticsearchException {
return putSettings(fn.apply(new PutIndicesSettingsRequest.Builder()).build());
}
/**
* Update index settings. Changes dynamic index settings in real time. For data
* streams, index setting changes are applied to all backing indices by default.
*
* To revert a setting to the default value, use a null value. The list of
* per-index settings that can be updated dynamically on live indices can be
* found in index settings documentation. To preserve existing settings from
* being updated, set the preserve_existing
parameter to
* true
.
*
* For performance optimization during bulk indexing, you can disable the
* refresh interval. Refer to disable
* refresh interval for an example. There are multiple valid ways to
* represent index settings in the request body. You can specify only the
* setting, for example:
*
*
* {
* "number_of_replicas": 1
* }
*
*
*
* Or you can use an index
setting object:
*
*
* {
* "index": {
* "number_of_replicas": 1
* }
* }
*
*
*
* Or you can use dot annotation:
*
*
* {
* "index.number_of_replicas": 1
* }
*
*
*
* Or you can embed any of the aforementioned options in a settings
* object. For example:
*
*
* {
* "settings": {
* "index": {
* "number_of_replicas": 1
* }
* }
* }
*
*
*
* NOTE: You can only define new analyzers on closed indices. To add an
* analyzer, you must close the index, define the analyzer, and reopen the
* index. You cannot close the write index of a data stream. To update the
* analyzer for a data stream's write index and future backing indices, update
* the analyzer in the index template used by the stream. Then roll over the
* data stream to apply the new analyzer to the stream's write index and future
* backing indices. This affects searches and any new data added to the stream
* after the rollover. However, it does not affect the data stream's backing
* indices or their existing data. To change the analyzer for existing backing
* indices, you must create a new data stream and reindex your data into it.
* Refer to updating
* analyzers on existing indices for step-by-step examples.
*
* @see Documentation
* on elastic.co
*/
public PutIndicesSettingsResponse putSettings() throws IOException, ElasticsearchException {
return this.transport.performRequest(new PutIndicesSettingsRequest.Builder().build(),
PutIndicesSettingsRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.put_template
/**
* Create or update a legacy index template. Index templates define settings,
* mappings, and aliases that can be applied automatically to new indices.
* Elasticsearch applies templates to new indices based on an index pattern that
* matches the index name.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* Composable templates always take precedence over legacy templates. If no
* composable template matches a new index, matching legacy templates are
* applied according to their order.
*
* Index templates are only applied during index creation. Changes to index
* templates do not affect existing indices. Settings and mappings specified in
* create index API requests override any settings or mappings specified in an
* index template.
*
* You can use C-style /* *\/
block comments in index templates.
* You can include comments anywhere in the request body, except before the
* opening curly bracket.
*
* Indices matching multiple templates
*
* Multiple index templates can potentially match an index, in this case, both
* the settings and mappings are merged into the final configuration of the
* index. The order of the merging can be controlled using the order parameter,
* with lower order being applied first, and higher orders overriding them.
* NOTE: Multiple matching templates with the same order value will result in a
* non-deterministic merging order.
*
* @see Documentation
* on elastic.co
*/
public PutTemplateResponse putTemplate(PutTemplateRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) PutTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Create or update a legacy index template. Index templates define settings,
* mappings, and aliases that can be applied automatically to new indices.
* Elasticsearch applies templates to new indices based on an index pattern that
* matches the index name.
*
* IMPORTANT: This documentation is about legacy index templates, which are
* deprecated and will be replaced by the composable templates introduced in
* Elasticsearch 7.8.
*
* Composable templates always take precedence over legacy templates. If no
* composable template matches a new index, matching legacy templates are
* applied according to their order.
*
* Index templates are only applied during index creation. Changes to index
* templates do not affect existing indices. Settings and mappings specified in
* create index API requests override any settings or mappings specified in an
* index template.
*
* You can use C-style /* *\/
block comments in index templates.
* You can include comments anywhere in the request body, except before the
* opening curly bracket.
*
* Indices matching multiple templates
*
* Multiple index templates can potentially match an index, in this case, both
* the settings and mappings are merged into the final configuration of the
* index. The order of the merging can be controlled using the order parameter,
* with lower order being applied first, and higher orders overriding them.
* NOTE: Multiple matching templates with the same order value will result in a
* non-deterministic merging order.
*
* @param fn
* a function that initializes a builder to create the
* {@link PutTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final PutTemplateResponse putTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return putTemplate(fn.apply(new PutTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.recovery
/**
* Get index recovery information. Get information about ongoing and completed
* shard recoveries for one or more indices. For data streams, the API returns
* information for the stream's backing indices.
*
* All recoveries, whether ongoing or complete, are kept in the cluster state
* and may be reported on at any time.
*
* Shard recovery is the process of initializing a shard copy, such as restoring
* a primary shard from a snapshot or creating a replica shard from a primary
* shard. When a shard recovery completes, the recovered shard is available for
* search and indexing.
*
* Recovery automatically occurs during the following processes:
*
* - When creating an index for the first time.
* - When a node rejoins the cluster and starts up any missing primary shard
* copies using the data that it holds in its data path.
* - Creation of new replica shard copies from the primary.
* - Relocation of a shard copy to a different node in the same cluster.
* - A snapshot restore operation.
* - A clone, shrink, or split operation.
*
*
* You can determine the cause of a shard recovery using the recovery or cat
* recovery APIs.
*
* The index recovery API reports information about completed recoveries only
* for shard copies that currently exist in the cluster. It only reports the
* last recovery for each shard copy and does not report historical information
* about earlier recoveries, nor does it report information about the recoveries
* of shard copies that no longer exist. This means that if a shard copy
* completes a recovery and then Elasticsearch relocates it onto a different
* node then the information about the original recovery will not be shown in
* the recovery API.
*
* @see Documentation
* on elastic.co
*/
public RecoveryResponse recovery(RecoveryRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) RecoveryRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index recovery information. Get information about ongoing and completed
* shard recoveries for one or more indices. For data streams, the API returns
* information for the stream's backing indices.
*
* All recoveries, whether ongoing or complete, are kept in the cluster state
* and may be reported on at any time.
*
* Shard recovery is the process of initializing a shard copy, such as restoring
* a primary shard from a snapshot or creating a replica shard from a primary
* shard. When a shard recovery completes, the recovered shard is available for
* search and indexing.
*
* Recovery automatically occurs during the following processes:
*
* - When creating an index for the first time.
* - When a node rejoins the cluster and starts up any missing primary shard
* copies using the data that it holds in its data path.
* - Creation of new replica shard copies from the primary.
* - Relocation of a shard copy to a different node in the same cluster.
* - A snapshot restore operation.
* - A clone, shrink, or split operation.
*
*
* You can determine the cause of a shard recovery using the recovery or cat
* recovery APIs.
*
* The index recovery API reports information about completed recoveries only
* for shard copies that currently exist in the cluster. It only reports the
* last recovery for each shard copy and does not report historical information
* about earlier recoveries, nor does it report information about the recoveries
* of shard copies that no longer exist. This means that if a shard copy
* completes a recovery and then Elasticsearch relocates it onto a different
* node then the information about the original recovery will not be shown in
* the recovery API.
*
* @param fn
* a function that initializes a builder to create the
* {@link RecoveryRequest}
* @see Documentation
* on elastic.co
*/
public final RecoveryResponse recovery(Function> fn)
throws IOException, ElasticsearchException {
return recovery(fn.apply(new RecoveryRequest.Builder()).build());
}
/**
* Get index recovery information. Get information about ongoing and completed
* shard recoveries for one or more indices. For data streams, the API returns
* information for the stream's backing indices.
*
* All recoveries, whether ongoing or complete, are kept in the cluster state
* and may be reported on at any time.
*
* Shard recovery is the process of initializing a shard copy, such as restoring
* a primary shard from a snapshot or creating a replica shard from a primary
* shard. When a shard recovery completes, the recovered shard is available for
* search and indexing.
*
* Recovery automatically occurs during the following processes:
*
* - When creating an index for the first time.
* - When a node rejoins the cluster and starts up any missing primary shard
* copies using the data that it holds in its data path.
* - Creation of new replica shard copies from the primary.
* - Relocation of a shard copy to a different node in the same cluster.
* - A snapshot restore operation.
* - A clone, shrink, or split operation.
*
*
* You can determine the cause of a shard recovery using the recovery or cat
* recovery APIs.
*
* The index recovery API reports information about completed recoveries only
* for shard copies that currently exist in the cluster. It only reports the
* last recovery for each shard copy and does not report historical information
* about earlier recoveries, nor does it report information about the recoveries
* of shard copies that no longer exist. This means that if a shard copy
* completes a recovery and then Elasticsearch relocates it onto a different
* node then the information about the original recovery will not be shown in
* the recovery API.
*
* @see Documentation
* on elastic.co
*/
public RecoveryResponse recovery() throws IOException, ElasticsearchException {
return this.transport.performRequest(new RecoveryRequest.Builder().build(), RecoveryRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.refresh
/**
* Refresh an index. A refresh makes recent operations performed on one or more
* indices available for search. For data streams, the API runs the refresh
* operation on the stream’s backing indices.
*
* By default, Elasticsearch periodically refreshes indices every second, but
* only on indices that have received one search request or more in the last 30
* seconds. You can change this default interval with the
* index.refresh_interval
setting.
*
* Refresh requests are synchronous and do not return a response until the
* refresh operation completes.
*
* Refreshes are resource-intensive. To ensure good cluster performance, it's
* recommended to wait for Elasticsearch's periodic refresh rather than
* performing an explicit refresh when possible.
*
* If your application workflow indexes documents and then runs a search to
* retrieve the indexed document, it's recommended to use the index API's
* refresh=wait_for
query parameter option. This option ensures the
* indexing operation waits for a periodic refresh before running the search.
*
* @see Documentation
* on elastic.co
*/
public RefreshResponse refresh(RefreshRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) RefreshRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Refresh an index. A refresh makes recent operations performed on one or more
* indices available for search. For data streams, the API runs the refresh
* operation on the stream’s backing indices.
*
* By default, Elasticsearch periodically refreshes indices every second, but
* only on indices that have received one search request or more in the last 30
* seconds. You can change this default interval with the
* index.refresh_interval
setting.
*
* Refresh requests are synchronous and do not return a response until the
* refresh operation completes.
*
* Refreshes are resource-intensive. To ensure good cluster performance, it's
* recommended to wait for Elasticsearch's periodic refresh rather than
* performing an explicit refresh when possible.
*
* If your application workflow indexes documents and then runs a search to
* retrieve the indexed document, it's recommended to use the index API's
* refresh=wait_for
query parameter option. This option ensures the
* indexing operation waits for a periodic refresh before running the search.
*
* @param fn
* a function that initializes a builder to create the
* {@link RefreshRequest}
* @see Documentation
* on elastic.co
*/
public final RefreshResponse refresh(Function> fn)
throws IOException, ElasticsearchException {
return refresh(fn.apply(new RefreshRequest.Builder()).build());
}
/**
* Refresh an index. A refresh makes recent operations performed on one or more
* indices available for search. For data streams, the API runs the refresh
* operation on the stream’s backing indices.
*
* By default, Elasticsearch periodically refreshes indices every second, but
* only on indices that have received one search request or more in the last 30
* seconds. You can change this default interval with the
* index.refresh_interval
setting.
*
* Refresh requests are synchronous and do not return a response until the
* refresh operation completes.
*
* Refreshes are resource-intensive. To ensure good cluster performance, it's
* recommended to wait for Elasticsearch's periodic refresh rather than
* performing an explicit refresh when possible.
*
* If your application workflow indexes documents and then runs a search to
* retrieve the indexed document, it's recommended to use the index API's
* refresh=wait_for
query parameter option. This option ensures the
* indexing operation waits for a periodic refresh before running the search.
*
* @see Documentation
* on elastic.co
*/
public RefreshResponse refresh() throws IOException, ElasticsearchException {
return this.transport.performRequest(new RefreshRequest.Builder().build(), RefreshRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.reload_search_analyzers
/**
* Reload search analyzers. Reload an index's search analyzers and their
* resources. For data streams, the API reloads search analyzers and resources
* for the stream's backing indices.
*
* IMPORTANT: After reloading the search analyzers you should clear the request
* cache to make sure it doesn't contain responses derived from the previous
* versions of the analyzer.
*
* You can use the reload search analyzers API to pick up changes to synonym
* files used in the synonym_graph
or synonym
token
* filter of a search analyzer. To be eligible, the token filter must have an
* updateable
flag of true
and only be used in search
* analyzers.
*
* NOTE: This API does not perform a reload for each shard of an index. Instead,
* it performs a reload for each node containing index shards. As a result, the
* total shard count returned by the API can differ from the number of index
* shards. Because reloading affects every node with an index shard, it is
* important to update the synonym file on every data node in the
* cluster--including nodes that don't contain a shard replica--before using
* this API. This ensures the synonym file is updated everywhere in the cluster
* in case shards are relocated in the future.
*
* @see Documentation
* on elastic.co
*/
public ReloadSearchAnalyzersResponse reloadSearchAnalyzers(ReloadSearchAnalyzersRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ReloadSearchAnalyzersRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Reload search analyzers. Reload an index's search analyzers and their
* resources. For data streams, the API reloads search analyzers and resources
* for the stream's backing indices.
*
* IMPORTANT: After reloading the search analyzers you should clear the request
* cache to make sure it doesn't contain responses derived from the previous
* versions of the analyzer.
*
* You can use the reload search analyzers API to pick up changes to synonym
* files used in the synonym_graph
or synonym
token
* filter of a search analyzer. To be eligible, the token filter must have an
* updateable
flag of true
and only be used in search
* analyzers.
*
* NOTE: This API does not perform a reload for each shard of an index. Instead,
* it performs a reload for each node containing index shards. As a result, the
* total shard count returned by the API can differ from the number of index
* shards. Because reloading affects every node with an index shard, it is
* important to update the synonym file on every data node in the
* cluster--including nodes that don't contain a shard replica--before using
* this API. This ensures the synonym file is updated everywhere in the cluster
* in case shards are relocated in the future.
*
* @param fn
* a function that initializes a builder to create the
* {@link ReloadSearchAnalyzersRequest}
* @see Documentation
* on elastic.co
*/
public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers(
Function> fn)
throws IOException, ElasticsearchException {
return reloadSearchAnalyzers(fn.apply(new ReloadSearchAnalyzersRequest.Builder()).build());
}
// ----- Endpoint: indices.resolve_cluster
/**
* Resolve the cluster.
*
* Resolve the specified index expressions to return information about each
* cluster, including the local "querying" cluster, if included. If no
* index expression is provided, the API will return information about all the
* remote clusters that are configured on the querying cluster.
*
* This endpoint is useful before doing a cross-cluster search in order to
* determine which remote clusters should be included in a search.
*
* You use the same index expression with this endpoint as you would for
* cross-cluster search. Index and cluster exclusions are also supported with
* this endpoint.
*
* For each cluster in the index expression, information is returned about:
*
* - Whether the querying ("local") cluster is currently connected
* to each remote cluster specified in the index expression. Note that this
* endpoint actively attempts to contact the remote clusters, unlike the
*
remote/info
endpoint.
* - Whether each remote cluster is configured with
*
skip_unavailable
as true
or
* false
.
* - Whether there are any indices, aliases, or data streams on that cluster
* that match the index expression.
* - Whether the search is likely to have errors returned when you do the
* cross-cluster search (including any authorization errors if you do not have
* permission to query the index).
* - Cluster version information, including the Elasticsearch server
* version.
*
*
* For example,
* GET /_resolve/cluster/my-index-*,cluster*:my-index-*
returns
* information about the local cluster and all remotely configured clusters that
* start with the alias cluster*
. Each cluster returns information
* about whether it has any indices, aliases or data streams that match
* my-index-*
.
*
Note on backwards compatibility
*
* The ability to query without an index expression was added in version 8.18,
* so when querying remote clusters older than that, the local cluster will send
* the index expression dummy*
to those remote clusters. Thus, if
* an errors occur, you may see a reference to that index expression even though
* you didn't request it. If it causes a problem, you can instead include an
* index expression like *:*
to bypass the issue.
*
Advantages of using this endpoint before a cross-cluster search
*
* You may want to exclude a cluster or index from a search when:
*
* - A remote cluster is not currently connected and is configured with
*
skip_unavailable=false
. Running a cross-cluster search under
* those conditions will cause the entire search to fail.
* - A cluster has no matching indices, aliases or data streams for the index
* expression (or your user does not have permissions to search them). For
* example, suppose your index expression is
logs*,remote1:logs*
* and the remote1 cluster has no indices, aliases or data streams that match
* logs*
. In that case, that cluster will return no results from
* that cluster if you include it in a cross-cluster search.
* - The index expression (combined with any query parameters you specify)
* will likely cause an exception to be thrown when you do the search. In these
* cases, the "error" field in the
_resolve/cluster
* response will be present. (This is also where security/permission errors will
* be shown.)
* - A remote cluster is an older version that does not support the feature
* you want to use in your search.
*
* Test availability of remote clusters
*
* The remote/info
endpoint is commonly used to test whether the
* "local" cluster (the cluster being queried) is connected to its
* remote clusters, but it does not necessarily reflect whether the remote
* cluster is available or not. The remote cluster may be available, while the
* local cluster is not currently connected to it.
*
* You can use the _resolve/cluster
API to attempt to reconnect to
* remote clusters. For example with GET _resolve/cluster
or
* GET _resolve/cluster/*:*
. The connected
field in
* the response will indicate whether it was successful. If a connection was
* (re-)established, this will also cause the remote/info
endpoint
* to now indicate a connected status.
*
* @see Documentation
* on elastic.co
*/
public ResolveClusterResponse resolveCluster(ResolveClusterRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ResolveClusterRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Resolve the cluster.
*
* Resolve the specified index expressions to return information about each
* cluster, including the local "querying" cluster, if included. If no
* index expression is provided, the API will return information about all the
* remote clusters that are configured on the querying cluster.
*
* This endpoint is useful before doing a cross-cluster search in order to
* determine which remote clusters should be included in a search.
*
* You use the same index expression with this endpoint as you would for
* cross-cluster search. Index and cluster exclusions are also supported with
* this endpoint.
*
* For each cluster in the index expression, information is returned about:
*
* - Whether the querying ("local") cluster is currently connected
* to each remote cluster specified in the index expression. Note that this
* endpoint actively attempts to contact the remote clusters, unlike the
*
remote/info
endpoint.
* - Whether each remote cluster is configured with
*
skip_unavailable
as true
or
* false
.
* - Whether there are any indices, aliases, or data streams on that cluster
* that match the index expression.
* - Whether the search is likely to have errors returned when you do the
* cross-cluster search (including any authorization errors if you do not have
* permission to query the index).
* - Cluster version information, including the Elasticsearch server
* version.
*
*
* For example,
* GET /_resolve/cluster/my-index-*,cluster*:my-index-*
returns
* information about the local cluster and all remotely configured clusters that
* start with the alias cluster*
. Each cluster returns information
* about whether it has any indices, aliases or data streams that match
* my-index-*
.
*
Note on backwards compatibility
*
* The ability to query without an index expression was added in version 8.18,
* so when querying remote clusters older than that, the local cluster will send
* the index expression dummy*
to those remote clusters. Thus, if
* an errors occur, you may see a reference to that index expression even though
* you didn't request it. If it causes a problem, you can instead include an
* index expression like *:*
to bypass the issue.
*
Advantages of using this endpoint before a cross-cluster search
*
* You may want to exclude a cluster or index from a search when:
*
* - A remote cluster is not currently connected and is configured with
*
skip_unavailable=false
. Running a cross-cluster search under
* those conditions will cause the entire search to fail.
* - A cluster has no matching indices, aliases or data streams for the index
* expression (or your user does not have permissions to search them). For
* example, suppose your index expression is
logs*,remote1:logs*
* and the remote1 cluster has no indices, aliases or data streams that match
* logs*
. In that case, that cluster will return no results from
* that cluster if you include it in a cross-cluster search.
* - The index expression (combined with any query parameters you specify)
* will likely cause an exception to be thrown when you do the search. In these
* cases, the "error" field in the
_resolve/cluster
* response will be present. (This is also where security/permission errors will
* be shown.)
* - A remote cluster is an older version that does not support the feature
* you want to use in your search.
*
* Test availability of remote clusters
*
* The remote/info
endpoint is commonly used to test whether the
* "local" cluster (the cluster being queried) is connected to its
* remote clusters, but it does not necessarily reflect whether the remote
* cluster is available or not. The remote cluster may be available, while the
* local cluster is not currently connected to it.
*
* You can use the _resolve/cluster
API to attempt to reconnect to
* remote clusters. For example with GET _resolve/cluster
or
* GET _resolve/cluster/*:*
. The connected
field in
* the response will indicate whether it was successful. If a connection was
* (re-)established, this will also cause the remote/info
endpoint
* to now indicate a connected status.
*
* @param fn
* a function that initializes a builder to create the
* {@link ResolveClusterRequest}
* @see Documentation
* on elastic.co
*/
public final ResolveClusterResponse resolveCluster(
Function> fn)
throws IOException, ElasticsearchException {
return resolveCluster(fn.apply(new ResolveClusterRequest.Builder()).build());
}
/**
* Resolve the cluster.
*
* Resolve the specified index expressions to return information about each
* cluster, including the local "querying" cluster, if included. If no
* index expression is provided, the API will return information about all the
* remote clusters that are configured on the querying cluster.
*
* This endpoint is useful before doing a cross-cluster search in order to
* determine which remote clusters should be included in a search.
*
* You use the same index expression with this endpoint as you would for
* cross-cluster search. Index and cluster exclusions are also supported with
* this endpoint.
*
* For each cluster in the index expression, information is returned about:
*
* - Whether the querying ("local") cluster is currently connected
* to each remote cluster specified in the index expression. Note that this
* endpoint actively attempts to contact the remote clusters, unlike the
*
remote/info
endpoint.
* - Whether each remote cluster is configured with
*
skip_unavailable
as true
or
* false
.
* - Whether there are any indices, aliases, or data streams on that cluster
* that match the index expression.
* - Whether the search is likely to have errors returned when you do the
* cross-cluster search (including any authorization errors if you do not have
* permission to query the index).
* - Cluster version information, including the Elasticsearch server
* version.
*
*
* For example,
* GET /_resolve/cluster/my-index-*,cluster*:my-index-*
returns
* information about the local cluster and all remotely configured clusters that
* start with the alias cluster*
. Each cluster returns information
* about whether it has any indices, aliases or data streams that match
* my-index-*
.
*
Note on backwards compatibility
*
* The ability to query without an index expression was added in version 8.18,
* so when querying remote clusters older than that, the local cluster will send
* the index expression dummy*
to those remote clusters. Thus, if
* an errors occur, you may see a reference to that index expression even though
* you didn't request it. If it causes a problem, you can instead include an
* index expression like *:*
to bypass the issue.
*
Advantages of using this endpoint before a cross-cluster search
*
* You may want to exclude a cluster or index from a search when:
*
* - A remote cluster is not currently connected and is configured with
*
skip_unavailable=false
. Running a cross-cluster search under
* those conditions will cause the entire search to fail.
* - A cluster has no matching indices, aliases or data streams for the index
* expression (or your user does not have permissions to search them). For
* example, suppose your index expression is
logs*,remote1:logs*
* and the remote1 cluster has no indices, aliases or data streams that match
* logs*
. In that case, that cluster will return no results from
* that cluster if you include it in a cross-cluster search.
* - The index expression (combined with any query parameters you specify)
* will likely cause an exception to be thrown when you do the search. In these
* cases, the "error" field in the
_resolve/cluster
* response will be present. (This is also where security/permission errors will
* be shown.)
* - A remote cluster is an older version that does not support the feature
* you want to use in your search.
*
* Test availability of remote clusters
*
* The remote/info
endpoint is commonly used to test whether the
* "local" cluster (the cluster being queried) is connected to its
* remote clusters, but it does not necessarily reflect whether the remote
* cluster is available or not. The remote cluster may be available, while the
* local cluster is not currently connected to it.
*
* You can use the _resolve/cluster
API to attempt to reconnect to
* remote clusters. For example with GET _resolve/cluster
or
* GET _resolve/cluster/*:*
. The connected
field in
* the response will indicate whether it was successful. If a connection was
* (re-)established, this will also cause the remote/info
endpoint
* to now indicate a connected status.
*
* @see Documentation
* on elastic.co
*/
public ResolveClusterResponse resolveCluster() throws IOException, ElasticsearchException {
return this.transport.performRequest(new ResolveClusterRequest.Builder().build(),
ResolveClusterRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.resolve_index
/**
* Resolve indices. Resolve the names and/or index patterns for indices,
* aliases, and data streams. Multiple patterns and remote clusters are
* supported.
*
* @see Documentation
* on elastic.co
*/
public ResolveIndexResponse resolveIndex(ResolveIndexRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ResolveIndexRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Resolve indices. Resolve the names and/or index patterns for indices,
* aliases, and data streams. Multiple patterns and remote clusters are
* supported.
*
* @param fn
* a function that initializes a builder to create the
* {@link ResolveIndexRequest}
* @see Documentation
* on elastic.co
*/
public final ResolveIndexResponse resolveIndex(
Function> fn)
throws IOException, ElasticsearchException {
return resolveIndex(fn.apply(new ResolveIndexRequest.Builder()).build());
}
// ----- Endpoint: indices.rollover
/**
* Roll over to a new index. TIP: It is recommended to use the index lifecycle
* rollover action to automate rollovers.
*
* The rollover API creates a new index for a data stream or index alias. The
* API behavior depends on the rollover target.
*
* Roll over a data stream
*
* If you roll over a data stream, the API creates a new write index for the
* stream. The stream's previous write index becomes a regular backing index. A
* rollover also increments the data stream's generation.
*
* Roll over an index alias with a write index
*
* TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a
* write index to manage time series data. Data streams replace this
* functionality, require less maintenance, and automatically integrate with
* data tiers.
*
* If an index alias points to multiple indices, one of the indices must be a
* write index. The rollover API creates a new write index for the alias with
* is_write_index
set to true
. The API also
* sets is_write_index
to false
for the previous write
* index.
*
* Roll over an index alias with one index
*
* If you roll over an index alias that points to only one index, the API
* creates a new index for the alias and removes the original index from the
* alias.
*
* NOTE: A rollover creates a new index and is subject to the
* wait_for_active_shards
setting.
*
* Increment index names for an alias
*
* When you roll over an index alias, you can specify a name for the new index.
* If you don't specify a name and the current index ends with -
* and a number, such as my-index-000001
or
* my-index-3
, the new index name increments that number. For
* example, if you roll over an alias with a current index of
* my-index-000001
, the rollover creates a new index named
* my-index-000002
. This number is always six characters and
* zero-padded, regardless of the previous index's name.
*
* If you use an index alias for time series data, you can use date math in the
* index name to track the rollover date. For example, you can create an alias
* that points to an index named <my-index-{now/d}-000001>
.
* If you create the index on May 6, 2099, the index's name is
* my-index-2099.05.06-000001
. If you roll over the alias on May 7,
* 2099, the new index's name is my-index-2099.05.07-000002
.
*
* @see Documentation
* on elastic.co
*/
public RolloverResponse rollover(RolloverRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) RolloverRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Roll over to a new index. TIP: It is recommended to use the index lifecycle
* rollover action to automate rollovers.
*
* The rollover API creates a new index for a data stream or index alias. The
* API behavior depends on the rollover target.
*
* Roll over a data stream
*
* If you roll over a data stream, the API creates a new write index for the
* stream. The stream's previous write index becomes a regular backing index. A
* rollover also increments the data stream's generation.
*
* Roll over an index alias with a write index
*
* TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a
* write index to manage time series data. Data streams replace this
* functionality, require less maintenance, and automatically integrate with
* data tiers.
*
* If an index alias points to multiple indices, one of the indices must be a
* write index. The rollover API creates a new write index for the alias with
* is_write_index
set to true
. The API also
* sets is_write_index
to false
for the previous write
* index.
*
* Roll over an index alias with one index
*
* If you roll over an index alias that points to only one index, the API
* creates a new index for the alias and removes the original index from the
* alias.
*
* NOTE: A rollover creates a new index and is subject to the
* wait_for_active_shards
setting.
*
* Increment index names for an alias
*
* When you roll over an index alias, you can specify a name for the new index.
* If you don't specify a name and the current index ends with -
* and a number, such as my-index-000001
or
* my-index-3
, the new index name increments that number. For
* example, if you roll over an alias with a current index of
* my-index-000001
, the rollover creates a new index named
* my-index-000002
. This number is always six characters and
* zero-padded, regardless of the previous index's name.
*
* If you use an index alias for time series data, you can use date math in the
* index name to track the rollover date. For example, you can create an alias
* that points to an index named <my-index-{now/d}-000001>
.
* If you create the index on May 6, 2099, the index's name is
* my-index-2099.05.06-000001
. If you roll over the alias on May 7,
* 2099, the new index's name is my-index-2099.05.07-000002
.
*
* @param fn
* a function that initializes a builder to create the
* {@link RolloverRequest}
* @see Documentation
* on elastic.co
*/
public final RolloverResponse rollover(Function> fn)
throws IOException, ElasticsearchException {
return rollover(fn.apply(new RolloverRequest.Builder()).build());
}
// ----- Endpoint: indices.segments
/**
* Get index segments. Get low-level information about the Lucene segments in
* index shards. For data streams, the API returns information about the
* stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public SegmentsResponse segments(SegmentsRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) SegmentsRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index segments. Get low-level information about the Lucene segments in
* index shards. For data streams, the API returns information about the
* stream's backing indices.
*
* @param fn
* a function that initializes a builder to create the
* {@link SegmentsRequest}
* @see Documentation
* on elastic.co
*/
public final SegmentsResponse segments(Function> fn)
throws IOException, ElasticsearchException {
return segments(fn.apply(new SegmentsRequest.Builder()).build());
}
/**
* Get index segments. Get low-level information about the Lucene segments in
* index shards. For data streams, the API returns information about the
* stream's backing indices.
*
* @see Documentation
* on elastic.co
*/
public SegmentsResponse segments() throws IOException, ElasticsearchException {
return this.transport.performRequest(new SegmentsRequest.Builder().build(), SegmentsRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.shard_stores
/**
* Get index shard stores. Get store information about replica shards in one or
* more indices. For data streams, the API retrieves store information for the
* stream's backing indices.
*
* The index shard stores API returns the following information:
*
* - The node on which each replica shard exists.
* - The allocation ID for each replica shard.
* - A unique ID for each replica shard.
* - Any errors encountered while opening the shard index or from an earlier
* failure.
*
*
* By default, the API returns store information only for primary shards that
* are unassigned or have one or more unassigned replica shards.
*
* @see Documentation
* on elastic.co
*/
public ShardStoresResponse shardStores(ShardStoresRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ShardStoresRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Get index shard stores. Get store information about replica shards in one or
* more indices. For data streams, the API retrieves store information for the
* stream's backing indices.
*
* The index shard stores API returns the following information:
*
* - The node on which each replica shard exists.
* - The allocation ID for each replica shard.
* - A unique ID for each replica shard.
* - Any errors encountered while opening the shard index or from an earlier
* failure.
*
*
* By default, the API returns store information only for primary shards that
* are unassigned or have one or more unassigned replica shards.
*
* @param fn
* a function that initializes a builder to create the
* {@link ShardStoresRequest}
* @see Documentation
* on elastic.co
*/
public final ShardStoresResponse shardStores(
Function> fn)
throws IOException, ElasticsearchException {
return shardStores(fn.apply(new ShardStoresRequest.Builder()).build());
}
/**
* Get index shard stores. Get store information about replica shards in one or
* more indices. For data streams, the API retrieves store information for the
* stream's backing indices.
*
* The index shard stores API returns the following information:
*
* - The node on which each replica shard exists.
* - The allocation ID for each replica shard.
* - A unique ID for each replica shard.
* - Any errors encountered while opening the shard index or from an earlier
* failure.
*
*
* By default, the API returns store information only for primary shards that
* are unassigned or have one or more unassigned replica shards.
*
* @see Documentation
* on elastic.co
*/
public ShardStoresResponse shardStores() throws IOException, ElasticsearchException {
return this.transport.performRequest(new ShardStoresRequest.Builder().build(), ShardStoresRequest._ENDPOINT,
this.transportOptions);
}
// ----- Endpoint: indices.shrink
/**
* Shrink an index. Shrink an index into a new index with fewer primary shards.
*
* Before you can shrink an index:
*
* - The index must be read-only.
* - A copy of every shard in the index must reside on the same node.
* - The index must have a green health status.
*
*
* To make shard allocation easier, we recommend you also remove the index's
* replica shards. You can later re-add replica shards as part of the shrink
* operation.
*
* The requested number of primary shards in the target index must be a factor
* of the number of shards in the source index. For example an index with 8
* primary shards can be shrunk into 4, 2 or 1 primary shards or an index with
* 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in
* the index is a prime number it can only be shrunk into a single primary shard
* Before shrinking, a (primary or replica) copy of every shard in the index
* must be present on the same node.
*
* The current write index on a data stream cannot be shrunk. In order to shrink
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be shrunk.
*
* A shrink operation:
*
* - Creates a new target index with the same definition as the source index,
* but with a smaller number of primary shards.
* - Hard-links segments from the source index into the target index. If the
* file system does not support hard-linking, then all segments are copied into
* the new index, which is a much more time consuming process. Also if using
* multiple data paths, shards on different data paths require a full copy of
* segment files if they are not on the same disk since hardlinks do not work
* across disks.
* - Recovers the target index as though it were a closed index which had just
* been re-opened. Recovers shards to the
*
.routing.allocation.initial_recovery._id
index setting.
*
*
* IMPORTANT: Indices can only be shrunk if they satisfy the following
* requirements:
*
* - The target index must not exist.
* - The source index must have more primary shards than the target
* index.
* - The number of primary shards in the target index must be a factor of the
* number of primary shards in the source index. The source index must have more
* primary shards than the target index.
* - The index must not contain more than 2,147,483,519 documents in total
* across all shards that will be shrunk into a single shard on the target index
* as this is the maximum number of docs that can fit into a single shard.
* - The node handling the shrink process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* @see Documentation
* on elastic.co
*/
public ShrinkResponse shrink(ShrinkRequest request) throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) ShrinkRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Shrink an index. Shrink an index into a new index with fewer primary shards.
*
* Before you can shrink an index:
*
* - The index must be read-only.
* - A copy of every shard in the index must reside on the same node.
* - The index must have a green health status.
*
*
* To make shard allocation easier, we recommend you also remove the index's
* replica shards. You can later re-add replica shards as part of the shrink
* operation.
*
* The requested number of primary shards in the target index must be a factor
* of the number of shards in the source index. For example an index with 8
* primary shards can be shrunk into 4, 2 or 1 primary shards or an index with
* 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in
* the index is a prime number it can only be shrunk into a single primary shard
* Before shrinking, a (primary or replica) copy of every shard in the index
* must be present on the same node.
*
* The current write index on a data stream cannot be shrunk. In order to shrink
* the current write index, the data stream must first be rolled over so that a
* new write index is created and then the previous write index can be shrunk.
*
* A shrink operation:
*
* - Creates a new target index with the same definition as the source index,
* but with a smaller number of primary shards.
* - Hard-links segments from the source index into the target index. If the
* file system does not support hard-linking, then all segments are copied into
* the new index, which is a much more time consuming process. Also if using
* multiple data paths, shards on different data paths require a full copy of
* segment files if they are not on the same disk since hardlinks do not work
* across disks.
* - Recovers the target index as though it were a closed index which had just
* been re-opened. Recovers shards to the
*
.routing.allocation.initial_recovery._id
index setting.
*
*
* IMPORTANT: Indices can only be shrunk if they satisfy the following
* requirements:
*
* - The target index must not exist.
* - The source index must have more primary shards than the target
* index.
* - The number of primary shards in the target index must be a factor of the
* number of primary shards in the source index. The source index must have more
* primary shards than the target index.
* - The index must not contain more than 2,147,483,519 documents in total
* across all shards that will be shrunk into a single shard on the target index
* as this is the maximum number of docs that can fit into a single shard.
* - The node handling the shrink process must have sufficient free disk space
* to accommodate a second copy of the existing index.
*
*
* @param fn
* a function that initializes a builder to create the
* {@link ShrinkRequest}
* @see Documentation
* on elastic.co
*/
public final ShrinkResponse shrink(Function> fn)
throws IOException, ElasticsearchException {
return shrink(fn.apply(new ShrinkRequest.Builder()).build());
}
// ----- Endpoint: indices.simulate_index_template
/**
* Simulate an index. Get the index configuration that would be applied to the
* specified index from an existing index template.
*
* @see Documentation
* on elastic.co
*/
public SimulateIndexTemplateResponse simulateIndexTemplate(SimulateIndexTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) SimulateIndexTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Simulate an index. Get the index configuration that would be applied to the
* specified index from an existing index template.
*
* @param fn
* a function that initializes a builder to create the
* {@link SimulateIndexTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final SimulateIndexTemplateResponse simulateIndexTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return simulateIndexTemplate(fn.apply(new SimulateIndexTemplateRequest.Builder()).build());
}
// ----- Endpoint: indices.simulate_template
/**
* Simulate an index template. Get the index configuration that would be applied
* by a particular index template.
*
* @see Documentation
* on elastic.co
*/
public SimulateTemplateResponse simulateTemplate(SimulateTemplateRequest request)
throws IOException, ElasticsearchException {
@SuppressWarnings("unchecked")
JsonEndpoint endpoint = (JsonEndpoint) SimulateTemplateRequest._ENDPOINT;
return this.transport.performRequest(request, endpoint, this.transportOptions);
}
/**
* Simulate an index template. Get the index configuration that would be applied
* by a particular index template.
*
* @param fn
* a function that initializes a builder to create the
* {@link SimulateTemplateRequest}
* @see Documentation
* on elastic.co
*/
public final SimulateTemplateResponse simulateTemplate(
Function> fn)
throws IOException, ElasticsearchException {
return simulateTemplate(fn.apply(new SimulateTemplateRequest.Builder()).build());
}
/**
* Simulate an index template. Get the index configuration that would be applied
* by a particular index template.
*
* @see Documentation
* on elastic.co
*/
public SimulateTemplateResponse simulateTemplate() throws IOException, ElasticsearchException {
return this.transport.performRequest(new SimulateTemplateRequest.Builder().build(),
SimulateTemplateRequest._ENDPOINT, this.transportOptions);
}
// ----- Endpoint: indices.split
/**
* Split an index. Split an index into a new index with more primary shards.
*
*