Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.cluster.metadata;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.opensearch.LegacyESVersion;
import org.opensearch.OpenSearchException;
import org.opensearch.ResourceAlreadyExistsException;
import org.opensearch.Version;
import org.opensearch.action.admin.indices.alias.Alias;
import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
import org.opensearch.action.admin.indices.shrink.ResizeType;
import org.opensearch.action.support.ActiveShardCount;
import org.opensearch.action.support.ActiveShardsObserver;
import org.opensearch.cluster.AckedClusterStateUpdateTask;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.ack.ClusterStateUpdateResponse;
import org.opensearch.cluster.ack.CreateIndexClusterStateUpdateResponse;
import org.opensearch.cluster.block.ClusterBlock;
import org.opensearch.cluster.block.ClusterBlockLevel;
import org.opensearch.cluster.block.ClusterBlocks;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.cluster.routing.IndexRoutingTable;
import org.opensearch.cluster.routing.RoutingTable;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.allocation.AllocationService;
import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance;
import org.opensearch.cluster.service.ClusterManagerTaskKeys;
import org.opensearch.cluster.service.ClusterManagerTaskThrottler;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.Nullable;
import org.opensearch.common.Priority;
import org.opensearch.common.UUIDs;
import org.opensearch.common.ValidationException;
import org.opensearch.common.compress.CompressedXContent;
import org.opensearch.common.io.PathUtils;
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.IndexScopedSettings;
import org.opensearch.common.settings.Setting;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.xcontent.XContentHelper;
import org.opensearch.core.action.ActionListener;
import org.opensearch.core.common.Strings;
import org.opensearch.core.index.Index;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexModule;
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.mapper.DocumentMapper;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.mapper.MapperService.MergeReason;
import org.opensearch.index.query.QueryShardContext;
import org.opensearch.index.shard.IndexSettingProvider;
import org.opensearch.index.translog.Translog;
import org.opensearch.indices.IndexCreationException;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.InvalidIndexNameException;
import org.opensearch.indices.ShardLimitValidator;
import org.opensearch.indices.SystemIndices;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.node.Node;
import org.opensearch.node.remotestore.RemoteStoreNodeAttribute;
import org.opensearch.threadpool.ThreadPool;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.file.Path;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static java.util.stream.Collectors.toList;
import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING;
import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE;
import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent;
/**
* Service responsible for submitting create index requests
*
* @opensearch.internal
*/
public class MetadataCreateIndexService {
private static final Logger logger = LogManager.getLogger(MetadataCreateIndexService.class);
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(MetadataCreateIndexService.class);
public static final int MAX_INDEX_NAME_BYTES = 255;
private final Settings settings;
private final ClusterService clusterService;
private final IndicesService indicesService;
private final AllocationService allocationService;
private final AliasValidator aliasValidator;
private final Environment env;
private final IndexScopedSettings indexScopedSettings;
private final ActiveShardsObserver activeShardsObserver;
private final NamedXContentRegistry xContentRegistry;
private final SystemIndices systemIndices;
private final ShardLimitValidator shardLimitValidator;
private final boolean forbidPrivateIndexSettings;
private final Set indexSettingProviders = new HashSet<>();
private final ClusterManagerTaskThrottler.ThrottlingKey createIndexTaskKey;
private AwarenessReplicaBalance awarenessReplicaBalance;
public MetadataCreateIndexService(
final Settings settings,
final ClusterService clusterService,
final IndicesService indicesService,
final AllocationService allocationService,
final AliasValidator aliasValidator,
final ShardLimitValidator shardLimitValidator,
final Environment env,
final IndexScopedSettings indexScopedSettings,
final ThreadPool threadPool,
final NamedXContentRegistry xContentRegistry,
final SystemIndices systemIndices,
final boolean forbidPrivateIndexSettings,
final AwarenessReplicaBalance awarenessReplicaBalance
) {
this.settings = settings;
this.clusterService = clusterService;
this.indicesService = indicesService;
this.allocationService = allocationService;
this.aliasValidator = aliasValidator;
this.env = env;
this.indexScopedSettings = indexScopedSettings;
this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool);
this.xContentRegistry = xContentRegistry;
this.systemIndices = systemIndices;
this.forbidPrivateIndexSettings = forbidPrivateIndexSettings;
this.shardLimitValidator = shardLimitValidator;
this.awarenessReplicaBalance = awarenessReplicaBalance;
// Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction.
createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true);
}
/**
* Add a provider to be invoked to get additional index settings prior to an index being created
*/
public void addAdditionalIndexSettingProvider(IndexSettingProvider provider) {
if (provider == null) {
throw new IllegalArgumentException("provider must not be null");
}
if (indexSettingProviders.contains(provider)) {
throw new IllegalArgumentException("provider already added");
}
this.indexSettingProviders.add(provider);
}
/**
* Validate the name for an index against some static rules and a cluster state.
*/
public void validateIndexName(String index, ClusterState state) {
validateIndexOrAliasName(index, InvalidIndexNameException::new);
if (!index.toLowerCase(Locale.ROOT).equals(index)) {
throw new InvalidIndexNameException(index, "must be lowercase");
}
// NOTE: dot-prefixed index names are validated after template application, not here
if (state.routingTable().hasIndex(index)) {
throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex());
}
if (state.metadata().hasIndex(index)) {
throw new ResourceAlreadyExistsException(state.metadata().index(index).getIndex());
}
if (state.metadata().hasAlias(index)) {
throw new InvalidIndexNameException(index, "already exists as alias");
}
}
/**
* Validates (if this index has a dot-prefixed name) whether it follows the rules for dot-prefixed indices.
* @param index The name of the index in question
* @param isHidden Whether or not this is a hidden index
*/
public boolean validateDotIndex(String index, @Nullable Boolean isHidden) {
if (index.charAt(0) == '.') {
if (systemIndices.validateSystemIndex(index)) {
return true;
} else if (isHidden) {
logger.trace("index [{}] is a hidden index", index);
} else {
DEPRECATION_LOGGER.deprecate(
"index_name_starts_with_dot",
"index name [{}] starts with a dot '.', in the next major version, index names "
+ "starting with a dot are reserved for hidden indices and system indices",
index
);
}
}
return false;
}
/**
* Validate the name for an index or alias against some static rules.
*/
public static void validateIndexOrAliasName(String index, BiFunction exceptionCtor) {
if (Strings.validFileName(index) == false) {
throw exceptionCtor.apply(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
if (index.isEmpty()) {
throw exceptionCtor.apply(index, "must not be empty");
}
if (index.contains("#")) {
throw exceptionCtor.apply(index, "must not contain '#'");
}
if (index.contains(":")) {
throw exceptionCtor.apply(index, "must not contain ':'");
}
if (index.charAt(0) == '_' || index.charAt(0) == '-' || index.charAt(0) == '+') {
throw exceptionCtor.apply(index, "must not start with '_', '-', or '+'");
}
int byteCount = 0;
try {
byteCount = index.getBytes("UTF-8").length;
} catch (UnsupportedEncodingException e) {
// UTF-8 should always be supported, but rethrow this if it is not for some reason
throw new OpenSearchException("Unable to determine length of index name", e);
}
if (byteCount > MAX_INDEX_NAME_BYTES) {
throw exceptionCtor.apply(index, "index name is too long, (" + byteCount + " > " + MAX_INDEX_NAME_BYTES + ")");
}
if (index.equals(".") || index.equals("..")) {
throw exceptionCtor.apply(index, "must not be '.' or '..'");
}
}
/**
* Creates an index in the cluster state and waits for the specified number of shard copies to
* become active (as specified in {@link CreateIndexClusterStateUpdateRequest#waitForActiveShards()})
* before sending the response on the listener. If the index creation was successfully applied on
* the cluster state, then {@link CreateIndexClusterStateUpdateResponse#isAcknowledged()} will return
* true, otherwise it will return false and no waiting will occur for started shards
* ({@link CreateIndexClusterStateUpdateResponse#isShardsAcknowledged()} will also be false). If the index
* creation in the cluster state was successful and the requisite shard copies were started before
* the timeout, then {@link CreateIndexClusterStateUpdateResponse#isShardsAcknowledged()} will
* return true, otherwise if the operation timed out, then it will return false.
*
* @param request the index creation cluster state update request
* @param listener the listener on which to send the index creation cluster state update response
*/
public void createIndex(
final CreateIndexClusterStateUpdateRequest request,
final ActionListener listener
) {
onlyCreateIndex(request, ActionListener.wrap(response -> {
if (response.isAcknowledged()) {
activeShardsObserver.waitForActiveShards(
new String[] { request.index() },
request.waitForActiveShards(),
request.ackTimeout(),
shardsAcknowledged -> {
if (shardsAcknowledged == false) {
logger.debug(
"[{}] index created, but the operation timed out while waiting for " + "enough shards to be started.",
request.index()
);
}
listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcknowledged));
},
listener::onFailure
);
} else {
listener.onResponse(new CreateIndexClusterStateUpdateResponse(false, false));
}
}, listener::onFailure));
}
private void onlyCreateIndex(
final CreateIndexClusterStateUpdateRequest request,
final ActionListener listener
) {
normalizeRequestSetting(request);
clusterService.submitStateUpdateTask(
"create-index [" + request.index() + "], cause [" + request.cause() + "]",
new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) {
@Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() {
return createIndexTaskKey;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return applyCreateIndexRequest(currentState, request, false);
}
@Override
public void onFailure(String source, Exception e) {
if (e instanceof ResourceAlreadyExistsException) {
logger.trace(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else {
logger.debug(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
}
super.onFailure(source, e);
}
}
);
}
private void normalizeRequestSetting(CreateIndexClusterStateUpdateRequest createIndexClusterStateRequest) {
Settings.Builder updatedSettingsBuilder = Settings.builder();
Settings build = updatedSettingsBuilder.put(createIndexClusterStateRequest.settings())
.normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX)
.build();
indexScopedSettings.validate(build, true);
createIndexClusterStateRequest.settings(build);
}
/**
* Handles the cluster state transition to a version that reflects the {@link CreateIndexClusterStateUpdateRequest}.
* All the requested changes are firstly validated before mutating the {@link ClusterState}.
*/
public ClusterState applyCreateIndexRequest(
ClusterState currentState,
CreateIndexClusterStateUpdateRequest request,
boolean silent,
BiConsumer metadataTransformer
) throws Exception {
normalizeRequestSetting(request);
logger.trace("executing IndexCreationTask for [{}] against cluster state version [{}]", request, currentState.version());
validate(request, currentState);
final Index recoverFromIndex = request.recoverFrom();
final IndexMetadata sourceMetadata = recoverFromIndex == null ? null : currentState.metadata().getIndexSafe(recoverFromIndex);
if (sourceMetadata != null) {
// If source metadata was provided, it means we're recovering from an existing index,
// in which case templates don't apply, so create the index from the source metadata
return applyCreateIndexRequestWithExistingMetadata(currentState, request, silent, sourceMetadata, metadataTransformer);
} else {
// Hidden indices apply templates slightly differently (ignoring wildcard '*'
// templates), so we need to check to see if the request is creating a hidden index
// prior to resolving which templates it matches
final Boolean isHiddenFromRequest = IndexMetadata.INDEX_HIDDEN_SETTING.exists(request.settings())
? IndexMetadata.INDEX_HIDDEN_SETTING.get(request.settings())
: null;
// The backing index may have a different name or prefix than the data stream name.
final String name = request.dataStreamName() != null ? request.dataStreamName() : request.index();
// Check to see if a v2 template matched
final String v2Template = MetadataIndexTemplateService.findV2Template(
currentState.metadata(),
name,
isHiddenFromRequest == null ? false : isHiddenFromRequest
);
if (v2Template != null) {
// If a v2 template was found, it takes precedence over all v1 templates, so create
// the index using that template and the request's specified settings
return applyCreateIndexRequestWithV2Template(currentState, request, silent, v2Template, metadataTransformer);
} else {
// A v2 template wasn't found, check the v1 templates, in the event no templates are
// found creation still works using the request's specified index settings
final List v1Templates = MetadataIndexTemplateService.findV1Templates(
currentState.metadata(),
request.index(),
isHiddenFromRequest
);
if (v1Templates.size() > 1) {
DEPRECATION_LOGGER.deprecate(
"index_template_multiple_match",
"index [{}] matches multiple legacy templates [{}], composable templates will only match a single template",
request.index(),
v1Templates.stream().map(IndexTemplateMetadata::name).sorted().collect(Collectors.joining(", "))
);
}
return applyCreateIndexRequestWithV1Templates(currentState, request, silent, v1Templates, metadataTransformer);
}
}
}
public ClusterState applyCreateIndexRequest(ClusterState currentState, CreateIndexClusterStateUpdateRequest request, boolean silent)
throws Exception {
return applyCreateIndexRequest(currentState, request, silent, null);
}
/**
* Given the state and a request as well as the metadata necessary to build a new index,
* validate the configuration with an actual index service as return a new cluster state with
* the index added (and rerouted)
* @param currentState the current state to base the new state off of
* @param request the create index request
* @param silent a boolean for whether logging should be at a lower or higher level
* @param sourceMetadata when recovering from an existing index, metadata that should be copied to the new index
* @param temporaryIndexMeta metadata for the new index built from templates, source metadata, and request settings
* @param mappings a list of all mapping definitions to apply, in order
* @param aliasSupplier a function that takes the real {@link IndexService} and returns a list of {@link AliasMetadata} aliases
* @param templatesApplied a list of the names of the templates applied, for logging
* @param metadataTransformer if provided, a function that may alter cluster metadata in the same cluster state update that
* creates the index
* @return a new cluster state with the index added
*/
private ClusterState applyCreateIndexWithTemporaryService(
final ClusterState currentState,
final CreateIndexClusterStateUpdateRequest request,
final boolean silent,
final IndexMetadata sourceMetadata,
final IndexMetadata temporaryIndexMeta,
final List