All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.runtime.executiongraph.failover.SchedulingPipelinedRegionComputeUtil Maven / Gradle / Ivy

The newest version!
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License
 */

package org.apache.flink.runtime.executiongraph.failover;

import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup;
import org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup;
import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID;
import org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex;
import org.apache.flink.runtime.scheduler.strategy.SchedulingPipelinedRegion;
import org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;

import static org.apache.flink.runtime.executiongraph.VertexGroupComputeUtil.mergeVertexGroups;
import static org.apache.flink.runtime.executiongraph.VertexGroupComputeUtil.uniqueVertexGroups;
import static org.apache.flink.runtime.executiongraph.failover.PipelinedRegionComputeUtil.buildRawRegions;
import static org.apache.flink.util.Preconditions.checkState;

/** Utils for computing {@link SchedulingPipelinedRegion}s. */
public final class SchedulingPipelinedRegionComputeUtil {

    public static Set> computePipelinedRegions(
            final Iterable topologicallySortedVertices,
            final Function
                    executionVertexRetriever,
            final Function
                    resultPartitionRetriever) {

        final Map> vertexToRegion =
                buildRawRegions(
                        topologicallySortedVertices,
                        vertex ->
                                getMustBePipelinedConsumedResults(
                                        vertex, resultPartitionRetriever));

        return mergeRegionsOnCycles(vertexToRegion, executionVertexRetriever);
    }

    /**
     * Merge the regions base on 
     * Tarjan's strongly connected components algorithm. For more details please see FLINK-17330.
     */
    private static Set> mergeRegionsOnCycles(
            final Map> vertexToRegion,
            final Function
                    executionVertexRetriever) {

        final List> regionList =
                new ArrayList<>(uniqueVertexGroups(vertexToRegion));
        final List> outEdges =
                buildOutEdgesDesc(vertexToRegion, regionList, executionVertexRetriever);
        final Set> sccs =
                StronglyConnectedComponentsComputeUtils.computeStronglyConnectedComponents(
                        outEdges.size(), outEdges);

        final Set> mergedRegions =
                Collections.newSetFromMap(new IdentityHashMap<>());
        for (Set scc : sccs) {
            checkState(scc.size() > 0);

            Set mergedRegion = new HashSet<>();
            for (int regionIndex : scc) {
                mergedRegion =
                        mergeVertexGroups(
                                mergedRegion, regionList.get(regionIndex), vertexToRegion);
            }
            mergedRegions.add(mergedRegion);
        }

        return mergedRegions;
    }

    private static List> buildOutEdgesDesc(
            final Map> vertexToRegion,
            final List> regionList,
            final Function
                    executionVertexRetriever) {

        final Map, Integer> regionIndices = new IdentityHashMap<>();
        for (int i = 0; i < regionList.size(); i++) {
            regionIndices.put(regionList.get(i), i);
        }

        final List> outEdges = new ArrayList<>(regionList.size());
        for (Set currentRegion : regionList) {
            final List currentRegionOutEdges = new ArrayList<>();
            for (SchedulingExecutionVertex vertex : currentRegion) {
                for (SchedulingResultPartition producedResult : vertex.getProducedResults()) {
                    if (producedResult.getResultType().mustBePipelinedConsumed()) {
                        continue;
                    }
                    for (ConsumerVertexGroup consumerVertexGroup :
                            producedResult.getConsumerVertexGroups()) {
                        for (ExecutionVertexID consumerVertexId : consumerVertexGroup) {
                            SchedulingExecutionVertex consumerVertex =
                                    executionVertexRetriever.apply(consumerVertexId);
                            // Skip the ConsumerVertexGroup if its vertices are outside current
                            // regions and cannot be merged
                            if (!vertexToRegion.containsKey(consumerVertex)) {
                                break;
                            }
                            if (!currentRegion.contains(consumerVertex)) {
                                currentRegionOutEdges.add(
                                        regionIndices.get(vertexToRegion.get(consumerVertex)));
                            }
                        }
                    }
                }
            }
            outEdges.add(currentRegionOutEdges);
        }

        return outEdges;
    }

    private static Iterable getMustBePipelinedConsumedResults(
            SchedulingExecutionVertex vertex,
            Function
                    resultPartitionRetriever) {
        List mustBePipelinedConsumedResults = new ArrayList<>();
        for (ConsumedPartitionGroup consumedPartitionGroup : vertex.getConsumedPartitionGroups()) {
            for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
                SchedulingResultPartition consumedResult =
                        resultPartitionRetriever.apply(partitionId);
                if (!consumedResult.getResultType().mustBePipelinedConsumed()) {
                    // The result types of partitions in one ConsumedPartitionGroup are all the same
                    break;
                }
                mustBePipelinedConsumedResults.add(consumedResult);
            }
        }
        return mustBePipelinedConsumedResults;
    }

    private SchedulingPipelinedRegionComputeUtil() {}
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy