Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.flush;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
public class SyncedFlushService extends AbstractComponent {
private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync";
private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight";
private final IndicesService indicesService;
private final ClusterService clusterService;
private final TransportService transportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public SyncedFlushService(Settings settings, IndicesService indicesService, ClusterService clusterService, TransportService transportService, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings);
this.indicesService = indicesService;
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest.class, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler());
transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest.class, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler());
transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest.class, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler());
indicesService.indicesLifecycle().addListener(new IndicesLifecycle.Listener() {
@Override
public void onShardInactive(final IndexShard indexShard) {
// we only want to call sync flush once, so only trigger it when we are on a primary
if (indexShard.routingEntry().primary()) {
attemptSyncedFlush(indexShard.shardId(), new ActionListener() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
logger.trace("{} sync flush on inactive shard returned successfully for sync_id: {}", syncedFlushResult.getShardId(), syncedFlushResult.syncId());
}
@Override
public void onFailure(Throwable e) {
logger.debug("{} sync flush on inactive shard failed", e, indexShard.shardId());
}
});
}
}
});
}
/**
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) {
final ClusterState state = clusterService.state();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map> results = ConcurrentCollections.newConcurrentMap();
int numberOfShards = 0;
for (String index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().index(index);
numberOfShards += indexMetaData.getNumberOfShards();
results.put(index, Collections.synchronizedList(new ArrayList()));
}
if (numberOfShards == 0) {
listener.onResponse(new SyncedFlushResponse(results));
return;
}
final CountDown countDown = new CountDown(numberOfShards);
for (final String index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().index(index);
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
final ShardId shardId = new ShardId(index, shard);
innerAttemptSyncedFlush(shardId, state, new ActionListener() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
@Override
public void onFailure(Throwable e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
});
}
}
}
/*
* Tries to flush all copies of a shard and write a sync id to it.
* After a synced flush two shard copies may only contain the same sync id if they contain the same documents.
* To ensure this, synced flush works in three steps:
* 1. Flush all shard copies and gather the commit ids for each copy after the flush
* 2. Ensure that there are no ongoing indexing operations on the primary
* 3. Perform an additional flush on each shard copy that writes the sync id
*
* Step 3 is only executed on a shard if
* a) the shard has no uncommitted changes since the last flush
* b) the last flush was the one executed in 1 (use the collected commit id to verify this)
*
* This alone is not enough to ensure that all copies contain the same documents. Without step 2 a sync id would be written for inconsistent copies in the following scenario:
*
* Write operation has completed on a primary and is being sent to replicas. The write request does not reach the replicas until sync flush is finished.
* Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have.
* Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush committed them) and there are no uncommitted
* changes on the replica (the write operation has not reached the replica yet).
*
* Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary.
* Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only
* be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on
* the replica if it contains the same changes that the primary contains.
*
* Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
**/
public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) {
innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener);
}
private void innerAttemptSyncedFlush(final ShardId shardId, final ClusterState state, final ActionListener actionListener) {
try {
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
final List activeShards = shardRoutingTable.activeShards();
final int totalShards = shardRoutingTable.getSize();
if (activeShards.size() == 0) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards"));
return;
}
final ActionListener