org.apache.flink.runtime.io.network.partition.JobMasterPartitionTrackerImpl Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.io.network.partition;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.util.Preconditions;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Implementation of {@link JobMasterPartitionTracker}.
*/
public class JobMasterPartitionTrackerImpl implements JobMasterPartitionTracker {
final Map> trackedPartitionsPerResource = new ConcurrentHashMap<>();
final Map partitionInfos = new ConcurrentHashMap<>();
@Override
public void startTrackingPartition(ResourceID resourceID, ResultPartitionID resultPartitionID) {
Preconditions.checkNotNull(resourceID);
Preconditions.checkNotNull(resultPartitionID);
partitionInfos.put(resultPartitionID, new PartitionInfo(resourceID, false));
trackedPartitionsPerResource.compute(resourceID, (ignored, partitionIds) -> {
if (partitionIds == null) {
partitionIds = new HashSet<>();
}
partitionIds.add(resultPartitionID);
return partitionIds;
});
}
@Override
public void stopTrackingPartition(ResultPartitionID resultPartitionID) {
Preconditions.checkNotNull(resultPartitionID);
final PartitionInfo partitionInfo = partitionInfos.remove(resultPartitionID);
if (partitionInfo != null) {
trackedPartitionsPerResource.computeIfPresent(
partitionInfo.getResourceID(), (ignored, trackedResultPartitionIds) -> {
trackedResultPartitionIds.remove(resultPartitionID);
return trackedResultPartitionIds.isEmpty()
? null
: trackedResultPartitionIds;
});
}
}
@Override
public Collection getTrackedPartitions(ResourceID resourceID) {
Preconditions.checkNotNull(resourceID);
return trackedPartitionsPerResource.getOrDefault(resourceID, Collections.emptySet());
}
@Override
public boolean isPartitionFailed(ResultPartitionID resultPartitionID) {
Preconditions.checkNotNull(resultPartitionID);
PartitionInfo partitionInfo = partitionInfos.get(resultPartitionID);
if (partitionInfo != null && partitionInfo.isFailed()) {
return true;
}
return false;
}
@Override
public void markPartitionFailed(ResultPartitionID resultPartitionID) {
Preconditions.checkNotNull(resultPartitionID);
PartitionInfo partitionInfo = partitionInfos.get(resultPartitionID);
if (partitionInfo != null) {
partitionInfo.markFailed();
}
}
private static class PartitionInfo {
private final ResourceID resourceID;
private boolean isFailed;
PartitionInfo(ResourceID resourceID, boolean isFailed) {
this.resourceID = resourceID;
this.isFailed = isFailed;
}
public ResourceID getResourceID() {
return resourceID;
}
public boolean isFailed() {
return isFailed;
}
public void markFailed() {
this.isFailed = true;
}
}
}