All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.elasticsearch.indices.flush.SyncedFlushService Maven / Gradle / Ivy

There is a newer version: 8.13.3
Show newest version
/*
 * Licensed to Elasticsearch under one or more contributor
 * license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright
 * ownership. Elasticsearch licenses this file to you under
 * the Apache License, Version 2.0 (the "License"); you may
 * not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */
package org.elasticsearch.indices.flush;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexShardMissingException;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

public class SyncedFlushService extends AbstractComponent {

    public static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
    public static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync";
    public static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight";

    private final IndicesService indicesService;
    private final ClusterService clusterService;
    private final TransportService transportService;
    private final ThreadPool threadPool;

    @Inject
    public SyncedFlushService(Settings settings, IndicesService indicesService, ClusterService clusterService, TransportService transportService, ThreadPool threadPool) {
        super(settings);
        this.indicesService = indicesService;
        this.clusterService = clusterService;
        this.transportService = transportService;
        this.threadPool = threadPool;

        transportService.registerHandler(PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushTransportHandler());
        transportService.registerHandler(SYNCED_FLUSH_ACTION_NAME, new SyncedFlushTransportHandler());
        transportService.registerHandler(IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpCountTransportHandler());
        indicesService.indicesLifecycle().addListener(new IndicesLifecycle.Listener() {
            @Override
            public void onShardInactive(final IndexShard indexShard) {
                // we only want to call sync flush once, so only trigger it when we are on a primary
                if (indexShard.routingEntry().primary()) {
                    attemptSyncedFlush(indexShard.shardId(), new ActionListener() {
                        @Override
                        public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
                            logger.trace("{} sync flush on inactive shard returned successfully for sync_id: {}", syncedFlushResult.getShardId(), syncedFlushResult.syncId());
                        }

                        @Override
                        public void onFailure(Throwable e) {
                            logger.debug("{} sync flush on inactive shard failed", e, indexShard.shardId());
                        }
                    });
                }
            }
        });
    }

    /**
     * a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
     * for more details.
     */
    public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) {
        final ClusterState state = clusterService.state();
        final String[] concreteIndices = state.metaData().concreteIndices(indicesOptions, aliasesOrIndices);
        final Map> results = ConcurrentCollections.newConcurrentMap();
        int totalNumberOfShards = 0;
        int numberOfShards = 0;
        for (String index : concreteIndices) {
            final IndexMetaData indexMetaData = state.metaData().index(index);
            totalNumberOfShards += indexMetaData.totalNumberOfShards();
            numberOfShards += indexMetaData.getNumberOfShards();
            results.put(index, Collections.synchronizedList(new ArrayList()));

        }
        if (numberOfShards == 0) {
            listener.onResponse(new IndicesSyncedFlushResult(results));
            return;
        }
        final int finalTotalNumberOfShards = totalNumberOfShards;
        final CountDown countDown = new CountDown(numberOfShards);

        for (final String index : concreteIndices) {
            final int indexNumberOfShards = state.metaData().index(index).getNumberOfShards();
            for (int shard = 0; shard < indexNumberOfShards; shard++) {
                final ShardId shardId = new ShardId(index, shard);
                attemptSyncedFlush(shardId, new ActionListener() {
                    @Override
                    public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
                        results.get(index).add(syncedFlushResult);
                        if (countDown.countDown()) {
                            listener.onResponse(new IndicesSyncedFlushResult(results));
                        }
                    }

                    @Override
                    public void onFailure(Throwable e) {
                        logger.debug("{} unexpected error while executing synced flush", shardId);
                        results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage()));
                        if (countDown.countDown()) {
                            listener.onResponse(new IndicesSyncedFlushResult(results));
                        }
                    }
                });
            }
        }
    }

    /*
    * Tries to flush all copies of a shard and write a sync id to it.
    * After a synced flush two shard copies may only contain the same sync id if they contain the same documents.
    * To ensure this, synced flush works in three steps:
    * 1. Flush all shard copies and gather the commit ids for each copy after the flush
    * 2. Ensure that there are no ongoing indexing operations on the primary
    * 3. Perform an additional flush on each shard copy that writes the sync id
    *
    * Step 3 is only executed on a shard if
    * a) the shard has no uncommitted changes since the last flush
    * b) the last flush was the one executed in 1 (use the collected commit id to verify this)
    *
    * This alone is not enough to ensure that all copies contain the same documents. Without step 2 a sync id would be written for inconsistent copies in the following scenario:
    *
    * Write operation has completed on a primary and is being sent to replicas. The write request does not reach the replicas until sync flush is finished.
    * Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have.
    * Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush committed them) and there are no uncommitted
    * changes on the replica (the write operation has not reached the replica yet).
    *
    * Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary.
    * Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only
    * be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on
    * the replica if it contains the same changes that the primary contains.
    *
    * Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
    **/
    public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) {
        try {
            final ClusterState state = clusterService.state();
            final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
            final List activeShards = shardRoutingTable.activeShards();
            final int totalShards = shardRoutingTable.getSize();

            if (activeShards.size() == 0) {
                actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards"));
                return;
            }

            final ActionListener> commitIdsListener = new ActionListener>() {
                @Override
                public void onResponse(final Map commitIds) {
                    if (commitIds.isEmpty()) {
                        actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync"));
                        return;
                    }
                    final ActionListener inflightOpsListener = new ActionListener() {
                        @Override
                        public void onResponse(InFlightOpsResponse response) {
                            final int inflight = response.opCount();
                            assert inflight >= 0;
                            if (inflight != 0) {
                                actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary"));
                            } else {
                                // 3. now send the sync request to all the shards
                                String syncId = Strings.base64UUID();
                                sendSyncRequests(syncId, activeShards, state, commitIds, shardId, totalShards, actionListener);
                            }
                        }

                        @Override
                        public void onFailure(Throwable e) {
                            actionListener.onFailure(e);
                        }
                    };
                    // 2. fetch in flight operations
                    getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsListener);
                }

                @Override
                public void onFailure(Throwable e) {
                    actionListener.onFailure(e);
                }
            };

            // 1. send pre-sync flushes to all replicas
            sendPreSyncRequests(activeShards, state, shardId, commitIdsListener);
        } catch (Throwable t) {
            actionListener.onFailure(t);
        }
    }

    final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) {
        final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.index().name());
        if (indexRoutingTable == null) {
            IndexMetaData index = state.getMetaData().index(shardId.index().getName());
            if (index != null && index.state() == IndexMetaData.State.CLOSE) {
                throw new IndexClosedException(shardId.index());
            }
            throw new IndexMissingException(shardId.index());
        }
        final IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.id());
        if (shardRoutingTable == null) {
            throw new IndexShardMissingException(shardId);
        }
        return shardRoutingTable;
    }

    /**
     * returns the number of in flight operations on primary. -1 upon error.
     */
    protected void getInflightOpsCount(final ShardId shardId, ClusterState state, IndexShardRoutingTable shardRoutingTable, final ActionListener listener) {
        try {
            final ShardRouting primaryShard = shardRoutingTable.primaryShard();
            final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId());
            if (primaryNode == null) {
                logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard);
                listener.onResponse(new InFlightOpsResponse(-1));
                return;
            }
            if (primaryNode.version().before(Version.V_1_6_0)) {
                logger.trace("{} primary shard {} is located on a node that doesn't support sync commits, skipping sync", shardId, primaryShard);
                listener.onResponse(new InFlightOpsResponse(-1));
                return;
            }
            logger.trace("{} retrieving in flight operation count", shardId);
            if (state.nodes().localNode().id().equals(primaryNode.id())) { // this is fast we can run it in this thread
                listener.onResponse(performInFlightOps(new InFlightOpsRequest(shardId)));
            } else {
                transportService.sendRequest(primaryNode, IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpsRequest(shardId),
                        new BaseTransportResponseHandler() {
                            @Override
                            public InFlightOpsResponse newInstance() {
                                return new InFlightOpsResponse();
                            }

                            @Override
                            public void handleResponse(InFlightOpsResponse response) {
                                listener.onResponse(response);
                            }

                            @Override
                            public void handleException(TransportException exp) {
                                logger.debug("{} unexpected error while retrieving inflight op count", exp, shardId);
                                listener.onFailure(exp);
                            }

                            @Override
                            public String executor() {
                                return ThreadPool.Names.SAME;
                            }
                        });
            }
        } catch (Throwable t) {
            listener.onFailure(t);
        }
    }


    void sendSyncRequests(final String syncId, final List shards, ClusterState state, Map expectedCommitIds,
                          final ShardId shardId, final int totalShards, final ActionListener listener) {
        final CountDown countDown = new CountDown(shards.size());
        final Map results = ConcurrentCollections.newConcurrentMap();
        for (final ShardRouting shard : shards) {
            final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
            if (node == null) {
                logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
                results.put(shard, new SyncedFlushResponse("unknown node"));
                contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
                continue;
            }
            final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
            if (expectedCommitId == null) {
                logger.trace("{} can't resolve expected commit id for node {}, skipping for sync id [{}]. shard routing {}", shardId, shard.currentNodeId(), syncId, shard);
                results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush"));
                contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
                continue;
            }
            assert node.version().onOrAfter(Version.V_1_6_0) : " node with version " + node.version() + " can not have an expected commit ID";
            logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
            final ActionListener currentShardListener = new ActionListener() {
                @Override
                public void onResponse(SyncedFlushResponse response) {
                    SyncedFlushResponse existing = results.put(shard, response);
                    assert existing == null : "got two answers for node [" + node + "]";
                    // count after the assert so we won't decrement twice in handleException
                    contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
                }

                @Override
                public void onFailure(Throwable e) {
                    logger.trace("{} error while performing synced flush on [{}], skipping", e, shardId, shard);
                    results.put(shard, new SyncedFlushResponse(e.getMessage()));
                    contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
                }
            };
            if (state.nodes().localNode().id().equals(node.id())) {
                execute(ThreadPool.Names.FLUSH, new Callable() {
                    @Override
                    public SyncedFlushResponse call() throws Exception {
                        return performSyncedFlush(new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId));
                    }
                }, currentShardListener);
            } else {
                transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId),
                        new BaseTransportResponseHandler() {
                            @Override
                            public SyncedFlushResponse newInstance() {
                                return new SyncedFlushResponse();
                            }

                            @Override
                            public void handleResponse(SyncedFlushResponse response) {
                                currentShardListener.onResponse(response);
                            }

                            @Override
                            public void handleException(TransportException exp) {
                                currentShardListener.onFailure(exp);
                            }

                            @Override
                            public String executor() {
                                return ThreadPool.Names.SAME;
                            }
                        });
            }
        }
    }

    private void contDownAndSendResponseIfDone(String syncId, List shards, ShardId shardId, int totalShards,
                                               ActionListener listener, CountDown countDown, Map results) {
        if (countDown.countDown()) {
            assert results.size() == shards.size();
            listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results));
        }
    }

    /**
     * send presync requests to all started copies of the given shard
     */
    void sendPreSyncRequests(final List shards, final ClusterState state, final ShardId shardId, final ActionListener> listener) {
        final CountDown countDown = new CountDown(shards.size());
        final ConcurrentMap commitIds = ConcurrentCollections.newConcurrentMap();
        for (final ShardRouting shard : shards) {
            logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
            final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
            if (node == null) {
                logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
                if (countDown.countDown()) {
                    listener.onResponse(commitIds);
                }
                continue;
            }
            if (node.version().before(Version.V_1_6_0)) {
                logger.trace("{} shard routing {} refers to an node that doesn't support sync commits. skipping.", shardId, shard);
                if(countDown.countDown()) {
                    listener.onResponse(commitIds);
                }
                continue;
            }
            final ActionListener currentShardListener = new ActionListener() {
                @Override
                public void onResponse(PreSyncedFlushResponse response) {
                    Engine.CommitId existing = commitIds.putIfAbsent(node.id(), response.commitId());
                    assert existing == null : "got two answers for node [" + node + "]";
                    // count after the assert so we won't decrement twice in handleException
                    if (countDown.countDown()) {
                        listener.onResponse(commitIds);
                    }
                }

                @Override
                public void onFailure(Throwable e) {
                    logger.trace("{} error while performing pre synced flush on [{}], skipping", e, shardId, shard);
                    if (countDown.countDown()) {
                        listener.onResponse(commitIds);
                    }
                }
            };
            if (state.nodes().localNode().id().equals(node.id())) {
                execute(ThreadPool.Names.FLUSH, new Callable() {
                    @Override
                    public PreSyncedFlushResponse call() throws Exception {
                        return performPreSyncedFlush(new PreSyncedFlushRequest(shard.shardId()));
                    }
                }, currentShardListener);
            } else {
                transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler() {
                    @Override
                    public PreSyncedFlushResponse newInstance() {
                        return new PreSyncedFlushResponse();
                    }

                    @Override
                    public void handleResponse(PreSyncedFlushResponse response) {
                        currentShardListener.onResponse(response);
                    }
                    @Override
                    public void handleException(TransportException exp) {
                       currentShardListener.onFailure(exp);
                    }

                    @Override
                    public String executor() {
                        return ThreadPool.Names.SAME;
                    }
                });
            }
        }
    }

    private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) {
        IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
        FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
        logger.trace("{} performing pre sync flush", request.shardId());
        Engine.CommitId commitId = indexShard.flush(flushRequest);
        logger.trace("{} pre sync flush done. commit id {}", request.shardId(), commitId);
        return new PreSyncedFlushResponse(commitId);
    }

    private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) {
        IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
        IndexShard indexShard = indexService.shardSafe(request.shardId().id());
        logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId());
        Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId());
        logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
        switch (result) {
            case SUCCESS:
                return new SyncedFlushResponse();
            case COMMIT_MISMATCH:
                return new SyncedFlushResponse("commit has changed");
            case PENDING_OPERATIONS:
                return new SyncedFlushResponse("pending operations");
            default:
                throw new ElasticsearchException("unknown synced flush result [" + result + "]");
        }
    }

    private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
        IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
        IndexShard indexShard = indexService.shardSafe(request.shardId().id());
        if (indexShard.routingEntry().primary() == false) {
            throw new IndexShardException(request.shardId(), "expected a primary shard");
        }
        int opCount = indexShard.getOperationsCount();
        logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
        return new InFlightOpsResponse(opCount);
    }

    final static class PreSyncedFlushRequest extends TransportRequest {
        private ShardId shardId;

        PreSyncedFlushRequest() {
        }

        public PreSyncedFlushRequest(ShardId shardId) {
            this.shardId = shardId;
        }

        @Override
        public String toString() {
            return "PreSyncedFlushRequest{" +
                    "shardId=" + shardId +
                    '}';
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            shardId.writeTo(out);
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            this.shardId = ShardId.readShardId(in);
        }

        public ShardId shardId() {
            return shardId;
        }
    }

    /**
     * Response for first step of synced flush (flush) for one shard copy
     */
    final static class PreSyncedFlushResponse extends TransportResponse {

        Engine.CommitId commitId;

        PreSyncedFlushResponse() {
        }

        PreSyncedFlushResponse(Engine.CommitId commitId) {
            if (commitId == null) {
                throw new IllegalArgumentException("CommitID must not be null");
            }
            this.commitId = commitId;
        }

        public Engine.CommitId commitId() {
            return commitId;
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            commitId = new Engine.CommitId(in);
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            commitId.writeTo(out);
        }
    }

    static final class SyncedFlushRequest extends TransportRequest {

        private String syncId;
        private Engine.CommitId expectedCommitId;
        private ShardId shardId;

        public SyncedFlushRequest() {
        }

        public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
            this.expectedCommitId = expectedCommitId;
            this.shardId = shardId;
            this.syncId = syncId;
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            shardId = ShardId.readShardId(in);
            expectedCommitId = new Engine.CommitId(in);
            syncId = in.readString();
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            shardId.writeTo(out);
            expectedCommitId.writeTo(out);
            out.writeString(syncId);
        }

        public ShardId shardId() {
            return shardId;
        }

        public String syncId() {
            return syncId;
        }

        public Engine.CommitId expectedCommitId() {
            return expectedCommitId;
        }

        @Override
        public String toString() {
            return "SyncedFlushRequest{" +
                    "shardId=" + shardId +
                    ",syncId='" + syncId + '\'' +
                    '}';
        }
    }

    /**
     * Response for third step of synced flush (writing the sync id) for one shard copy
     */
    public static final class SyncedFlushResponse extends TransportResponse {

        /**
         * a non null value indicates a failure to sync flush. null means success
         */
        String failureReason;

        public SyncedFlushResponse() {
            failureReason = null;
        }

        public SyncedFlushResponse(String failureReason) {
            this.failureReason = failureReason;
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            failureReason = in.readOptionalString();
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            out.writeOptionalString(failureReason);
        }

        public boolean success() {
            return failureReason == null;
        }

        public String failureReason() {
            return failureReason;
        }

        @Override
        public String toString() {
            return "SyncedFlushResponse{" +
                    "success=" + success() +
                    ", failureReason='" + failureReason + '\'' +
                    '}';
        }
    }


    static final class InFlightOpsRequest extends TransportRequest {

        private ShardId shardId;

        public InFlightOpsRequest() {
        }

        public InFlightOpsRequest(ShardId shardId) {
            this.shardId = shardId;
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            shardId = ShardId.readShardId(in);
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            shardId.writeTo(out);
        }

        public ShardId shardId() {
            return shardId;
        }

        @Override
        public String toString() {
            return "InFlightOpsRequest{" +
                    "shardId=" + shardId +
                    '}';
        }
    }

    /**
     * Response for second step of synced flush (check operations in flight)
     */
    static final class InFlightOpsResponse extends TransportResponse {

        int opCount;

        public InFlightOpsResponse() {
        }

        public InFlightOpsResponse(int opCount) {
            this.opCount = opCount;
        }

        @Override
        public void readFrom(StreamInput in) throws IOException {
            super.readFrom(in);
            opCount = in.readVInt();
        }

        @Override
        public void writeTo(StreamOutput out) throws IOException {
            super.writeTo(out);
            out.writeVInt(opCount);
        }

        public int opCount() {
            return opCount;
        }

        @Override
        public String toString() {
            return "InFlightOpsResponse{" +
                    "opCount=" + opCount +
                    '}';
        }
    }

    private final class PreSyncedFlushTransportHandler extends BaseTransportRequestHandler {

        @Override
        public PreSyncedFlushRequest newInstance() {
            return new PreSyncedFlushRequest();
        }

        @Override
        public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception {
            channel.sendResponse(performPreSyncedFlush(request));
        }

        @Override
        public String executor() {
            return ThreadPool.Names.FLUSH;
        }

        @Override
        public boolean isForceExecution() {
            return false;
        }
    }

    private final class SyncedFlushTransportHandler extends BaseTransportRequestHandler {

        @Override
        public SyncedFlushRequest newInstance() {
            return new SyncedFlushRequest();
        }

        @Override
        public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception {
            channel.sendResponse(performSyncedFlush(request));
        }

        @Override
        public String executor() {
            return ThreadPool.Names.FLUSH;
        }

        @Override
        public boolean isForceExecution() {
            return false;
        }
    }

    private final class InFlightOpCountTransportHandler extends BaseTransportRequestHandler {

        @Override
        public InFlightOpsRequest newInstance() {
            return new InFlightOpsRequest();
        }

        @Override
        public void messageReceived(InFlightOpsRequest request, TransportChannel channel) throws Exception {
            channel.sendResponse(performInFlightOps(request));
        }

        @Override
        public String executor() {
            return ThreadPool.Names.SAME;
        }

        @Override
        public boolean isForceExecution() {
            return false;
        }
    }

    private  void execute(String pool, final Callable callable, final ActionListener listener) {
        try {
            threadPool.executor(pool).execute(new Runnable() {
                @Override
                public void run() {
                    // Listeners typically do counting on errors and successes, and the decision to move to second phase, etc. is based on
                    // these counts so we need to be careful here to never propagate exceptions thrown by onResult to onFailure
                    T result = null;
                    Throwable error = null;
                    try {
                        result = callable.call();
                    } catch (Throwable t) {
                        error = t;
                    } finally {
                        if (result == null) {
                            assert error != null;
                            listener.onFailure(error);
                        } else {
                            assert error == null : error;
                            listener.onResponse(result);
                        }
                    }
                }
            });
        } catch (Throwable t) {
            listener.onFailure(t);
        }
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy