org.elasticsearch.cluster.metadata.MetaDataMappingService Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of elasticsearch Show documentation
Show all versions of elasticsearch Show documentation
Elasticsearch subproject :server
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Service responsible for submitting mapping changes
*/
public class MetaDataMappingService extends AbstractComponent {
private final ClusterService clusterService;
private final IndicesService indicesService;
final ClusterStateTaskExecutor refreshExecutor = new RefreshTaskExecutor();
final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor();
@Inject
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
}
static class RefreshTask {
final String index;
final String indexUUID;
RefreshTask(String index, final String indexUUID) {
this.index = index;
this.indexUUID = indexUUID;
}
@Override
public String toString() {
return "[" + index + "][" + indexUUID + "]";
}
}
class RefreshTaskExecutor implements ClusterStateTaskExecutor {
@Override
public BatchResult execute(ClusterState currentState, List tasks) throws Exception {
ClusterState newClusterState = executeRefresh(currentState, tasks);
return BatchResult.builder().successes(tasks).build(newClusterState);
}
}
/**
* Batch method to apply all the queued refresh operations. The idea is to try and batch as much
* as possible so we won't create the same index all the time for example for the updates on the same mapping
* and generate a single cluster change event out of all of those.
*/
ClusterState executeRefresh(final ClusterState currentState, final List allTasks) throws Exception {
// break down to tasks per index, so we can optimize the on demand index service creation
// to only happen for the duration of a single index processing of its respective events
Map> tasksPerIndex = new HashMap<>();
for (RefreshTask task : allTasks) {
if (task.index == null) {
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
}
tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
}
boolean dirty = false;
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (Map.Entry> entry : tasksPerIndex.entrySet()) {
IndexMetaData indexMetaData = mdBuilder.get(entry.getKey());
if (indexMetaData == null) {
// index got deleted on us, ignore...
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey());
continue;
}
final Index index = indexMetaData.getIndex();
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
// the latest (based on order) update mapping one per node
List allIndexTasks = entry.getValue();
boolean hasTaskWithRightUUID = false;
for (RefreshTask task : allIndexTasks) {
if (indexMetaData.isSameUUID(task.indexUUID)) {
hasTaskWithRightUUID = true;
} else {
logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task);
}
}
if (hasTaskWithRightUUID == false) {
continue;
}
// construct the actual index if needed, and make sure the relevant mappings are there
boolean removeIndex = false;
IndexService indexService = indicesService.indexService(indexMetaData.getIndex());
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData, Collections.emptyList());
removeIndex = true;
for (ObjectCursor metaData : indexMetaData.getMappings().values()) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
}
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
try {
boolean indexDirty = refreshIndexMapping(indexService, builder);
if (indexDirty) {
mdBuilder.put(builder);
dirty = true;
}
} finally {
if (removeIndex) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
if (!dirty) {
return currentState;
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
boolean dirty = false;
String index = indexService.index().getName();
try {
List updatedTypes = new ArrayList<>();
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
final String type = mapper.type();
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
updatedTypes.add(type);
}
}
// if a single type is not up-to-date, re-send everything
if (updatedTypes.isEmpty() == false) {
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
dirty = true;
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
builder.putMapping(new MappingMetaData(mapper));
}
}
} catch (Exception e) {
logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
}
return dirty;
}
/**
* Refreshes mappings if they are not the same between original and parsed version
*/
public void refreshMapping(final String index, final String indexUUID) {
final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
clusterService.submitStateUpdateTask("refresh-mapping",
refreshTask,
ClusterStateTaskConfig.build(Priority.HIGH),
refreshExecutor,
(source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failure during [{}]", source), e)
);
}
class PutMappingExecutor implements ClusterStateTaskExecutor {
@Override
public BatchResult execute(ClusterState currentState,
List tasks) throws Exception {
Map indexMapperServices = new HashMap<>();
BatchResult.Builder builder = BatchResult.builder();
try {
for (PutMappingClusterStateUpdateRequest request : tasks) {
try {
for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indexMapperServices.containsKey(indexMetaData.getIndex()) == false) {
MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
indexMapperServices.put(index, mapperService);
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor mapping : indexMetaData.getMappings().values()) {
mapperService.merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
}
currentState = applyRequest(currentState, request, indexMapperServices);
builder.success(request);
} catch (Exception e) {
builder.failure(request, e);
}
}
return builder.build(currentState);
} finally {
IOUtils.close(indexMapperServices.values());
}
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
Map indexMapperServices) throws IOException {
String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
final MetaData metaData = currentState.metaData();
final List updateList = new ArrayList<>();
for (Index index : request.indices()) {
MapperService mapperService = indexMapperServices.get(index);
// IMPORTANT: always get the metadata from the state since it get's batched
// and if we pull it from the indexService we might miss an update etc.
final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index);
// this is paranoia... just to be sure we use the exact same metadata tuple on the update that
// we used for the validation, it makes this mechanism little less scary (a little)
updateList.add(indexMetaData);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = mapperService.documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = mapperService.parse(request.type(), mappingUpdateSource, false);
} else {
newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) {
// first, simulate: just call merge and ignore the result
existingMapper.merge(newMapper.mapping(), request.updateAllTypes());
} else {
// TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about
// new types all at once , which can create a false error.
// For example in MapperService we can't distinguish between a create index api call
// and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) {
for (ObjectCursor mapping : indexMetaData.getMappings().values()) {
String parentType = newMapper.parentFieldMapper().type();
if (parentType.equals(mapping.value.type()) &&
mapperService.getParentTypes().contains(parentType) == false) {
throw new IllegalArgumentException("can't add a _parent field that points to an " +
"already existing type, that isn't already a parent");
}
}
}
}
}
if (mappingType == null) {
mappingType = newMapper.type();
} else if (mappingType.equals(newMapper.type()) == false) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
}
assert mappingType != null;
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]");
}
MetaData.Builder builder = MetaData.builder(metaData);
boolean updated = false;
for (IndexMetaData indexMetaData : updateList) {
// do the actual merge here on the master, and update the mapping source
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
final Index index = indexMetaData.getIndex();
final MapperService mapperService = indexMapperServices.get(index);
CompressedXContent existingSource = null;
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it
} else {
updated = true;
// use the merged mapping source
if (logger.isDebugEnabled()) {
logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("{} update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
updated = true;
if (logger.isDebugEnabled()) {
logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("{} create_mapping [{}]", index, mappingType);
}
}
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
// Mapping updates on a single type may have side-effects on other types so we need to
// update mapping metadata on all types
for (DocumentMapper mapper : mapperService.docMappers(true)) {
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
}
builder.put(indexMetaDataBuilder);
}
if (updated) {
return ClusterState.builder(currentState).metaData(builder).build();
} else {
return currentState;
}
}
@Override
public String describeTasks(List tasks) {
return tasks.stream().map(PutMappingClusterStateUpdateRequest::type).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
}
}
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener listener) {
clusterService.submitStateUpdateTask("put-mapping",
request,
ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
putMappingExecutor,
new AckedClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Exception e) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
});
}
}