All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.runtime.io.network.partition.DrainablePipelinedSubpartition Maven / Gradle / Ivy

There is a newer version: 1.5.1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.runtime.io.network.partition;

import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.runtime.io.network.buffer.BufferConsumer;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;
import java.io.IOException;
import java.util.Iterator;

import static org.apache.flink.util.Preconditions.checkState;

/**
 * A pipelined in-memory only subpartition, which would drain the data buffer if view is failed.
 */
class DrainablePipelinedSubpartition extends PipelinedSubpartition implements ReconnectableSubpartition {

	private static final Logger LOG = LoggerFactory.getLogger(DrainablePipelinedSubpartition.class);

	private BufferConsumer unfinishedBuffer;

	private State state = State.INITIALIZED;

	DrainablePipelinedSubpartition(int index, InternalResultPartition parent) {
		super(index, parent);
	}

	@Override
	public boolean add(BufferConsumer bufferConsumer) throws IOException {
		synchronized (buffers) {
			if (state != State.SUSPENDED) {
				return super.add(bufferConsumer);
			}

			clearFinishedBuffer();
			if (unfinishedBuffer != null) {
				// "build" means consuming the buffer, updating the position inside BufferConsumer
				// "recycleBuffer" means drop the buffer directly since it's draining
				unfinishedBuffer.build().recycleBuffer();
				checkState(unfinishedBuffer.isFinished());
				unfinishedBuffer.close();
				unfinishedBuffer = null;
			}

			bufferConsumer.build().recycleBuffer();
			if (bufferConsumer.isFinished()) {
				bufferConsumer.close();
			} else {
				unfinishedBuffer = bufferConsumer;
			}
			return true;
		}
	}

	@Override
	public void flush() {
		synchronized (buffers) {
			if (state != State.SUSPENDED) {
				super.flush();
			} else {
				if (unfinishedBuffer != null) {
					unfinishedBuffer.build().recycleBuffer();
					if (unfinishedBuffer.isFinished()) {
						unfinishedBuffer.close();
						unfinishedBuffer = null;
					}
				}
			}
		}
	}

	@Override
	public void finish() throws IOException {
		if (state == State.SUSPENDED) {
			// If a draining subpartition is finished, keep the EOP event in buffers
			LOG.info("Draining subpartition {} is finished", super.toString());
		}
		super.finish();
	}

	@Override
	public PipelinedSubpartitionView createReadView(BufferAvailabilityListener availabilityListener) throws IOException {
		synchronized (buffers) {
			checkState(!isReleased());

			LOG.debug("Creating read view for subpartition {} of partition {}.", index, parent.getPartitionId());

			return new DrainablePipelinedSubpartitionView(this, availabilityListener, false);
		}
	}

	@Override
	public void suspend(@Nullable ResultSubpartitionView view) {
		synchronized (buffers) {
			if (state != State.SUSPENDED) {
				if (readView != null) {
					if (view == null || readView.equals(view)) {
						LOG.info("{} is suspended, start draining now", this);
						state = State.SUSPENDED;
						clearFinishedBuffer();

						readView.releaseAllResources(new ConsumptionDeclinedException(parent.getPartitionId()));
						readView.notifyDataAvailable();
						readView = null;
					} else {
						LOG.info("Suspending is ignored because {} does not match the attached {}", view, readView);
					}
				} else {
					LOG.info("{} is suspended, start draining now", this);
					state = State.SUSPENDED;
					clearFinishedBuffer();
				}
			}
		}
	}

	@Override
	public void allowConsuming(ResultSubpartitionView view) {
		synchronized (buffers) {
			if (state != State.INITIALIZED) {
				LOG.info("{} is allowed to be consumed", this);
				// make sure there will be no incomplete buffer exists when reconnecting
				clearFinishedBuffer();
				checkState(buffers.isEmpty(), "Buffer should be empty after draining");
				if (unfinishedBuffer != null) {
					unfinishedBuffer.build().recycleBuffer();
					checkState(unfinishedBuffer.isFinished(), "Buffer should be finished after draining");
					unfinishedBuffer.close();
					unfinishedBuffer = null;
				}
			}
			if (state == State.CONSUMING) {
				checkState(readView != null, "There should be a view attached in consuming state");
				LOG.warn("There is still a view {} attached, fail it", readView);
				readView.releaseAllResources(new ConsumptionDeclinedException(parent.getPartitionId()));
				readView.notifyDataAvailable();
			}
			this.readView = (DrainablePipelinedSubpartitionView) view;
			((DrainablePipelinedSubpartitionView) view).allowConsuming();

			state = State.CONSUMING;

			updateFlushRequestedFlag(false);
			if (!buffers.isEmpty()) {
				readView.notifyDataAvailable();
			}
		}
	}

	@Override
	public State getState() {
		return state;
	}

	@Override
	public String toString() {
		return String.format("Drainable [ with state %s ] %s ", state.toString(), super.toString());
	}

	@VisibleForTesting
	BufferConsumer getUnfinishedBuffer() {
		return unfinishedBuffer;
	}

	private int clearFinishedBuffer() {
		int drainedBufferCount = 0;
		final Iterator it = buffers.iterator();
		while (it.hasNext()) {
			final BufferConsumer buffer = it.next();
			buffer.build().recycleBuffer();
			if (buffer.isFinished()) {
				buffer.close();
				it.remove();
				drainedBufferCount++;
			}
		}
		return drainedBufferCount;
	}
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy