All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.runtime.state.gemini.engine.fs.PersistHugePageToLocalSync Maven / Gradle / Ivy

There is a newer version: 1.5.1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.runtime.state.gemini.engine.fs;

import org.apache.flink.runtime.state.gemini.engine.GRegion;
import org.apache.flink.runtime.state.gemini.engine.GRegionContext;
import org.apache.flink.runtime.state.gemini.engine.dbms.GContext;
import org.apache.flink.runtime.state.gemini.engine.dbms.Supervisor;
import org.apache.flink.runtime.state.gemini.engine.exceptions.GeminiRuntimeException;
import org.apache.flink.runtime.state.gemini.engine.page.PageAddress;
import org.apache.flink.runtime.state.gemini.engine.vm.EvictPolicy;

import org.apache.flink.shaded.guava18.com.google.common.base.MoreObjects;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.EventExecutor;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.EventExecutorGroup;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiConsumer;

/**
 * PersistHugePageToLocalSync synchronously writes data to local disk ahead starting evict.
 */
public class PersistHugePageToLocalSync implements PersistenceStrategy {
	private static final Logger LOG = LoggerFactory.getLogger(PersistHugePageToLocalSync.class);
	private final EventExecutorGroup flushEventExecutorGroup;
	private final GContext gContext;
	private final Supervisor supervisor;
	private final EvictPolicy evictPolicy;
	private final int forceFlushCacheSize;

	private final AtomicLong runningPersistTask = new AtomicLong(0);
	private final AtomicLong runningPersistPageSize = new AtomicLong(0);
	private final AtomicLong totalPersistPageSize = new AtomicLong(0);

	public PersistHugePageToLocalSync(GContext gContext) {
		this.gContext = gContext;
		this.flushEventExecutorGroup = gContext.getSupervisor().getFlushExecutorGroup();
		this.supervisor = gContext.getSupervisor();
		this.forceFlushCacheSize = gContext.getGConfiguration().getForceSyncToCacheSize();
		this.evictPolicy = this.supervisor.getCacheManager().getEvictPolicy();
	}

	@Override
	public void persistPage(GRegion gRegion, PageAddress pageAddress, int compactedMemSize) {
		if (compactedMemSize < forceFlushCacheSize) {
			return;
		}
		//in this case, huge page don't influence Evict. so we return directly.
		if (this.evictPolicy.getMemoryUsedWaterMark(gRegion,
			compactedMemSize).getShortValue() < EvictPolicy.MemoryUsedWaterMark.Middle.getShortValue()) {
			return;
		}
		final EventExecutor flushEventExecutor = flushEventExecutorGroup.next();
		Iterator pageAddressIterator = pageAddress.pageIterator();
		List pageAddressList = new ArrayList<>();
		List gRegionContextList = new ArrayList<>();
		List> callBacks = new ArrayList<>();
		while (pageAddressIterator.hasNext()) {
			PageAddress pageAddressSingle = pageAddressIterator.next();
			pageAddressList.add(pageAddressSingle);
			gRegionContextList.add(gRegion.getGRegionContext());
		}

		CountDownLatch countDownLatch = new CountDownLatch(1);
		runningPersistTask.incrementAndGet();
		runningPersistPageSize.addAndGet(compactedMemSize);
		totalPersistPageSize.addAndGet(compactedMemSize);

		BiConsumer callBack = (success, throwable) -> {
			runningPersistTask.decrementAndGet();
			runningPersistPageSize.addAndGet(-compactedMemSize);
			if (!success) {
				LOG.error("persistPage flush local failed", throwable);
				gContext.setDBInternalError(new GeminiRuntimeException("persistPage flush local failed, " + throwable));
			} else {
				for (PageAddress pageAddressSingle : pageAddressList) {
					//TODO we can pick up some page to stay in the memory.
					if (pageAddressSingle.isLocalValid()) {
						pageAddressSingle.setDataPage(null);
					} else {
						LOG.error("persistPage flush local {}, but it's local invalid", pageAddressSingle);
					}
				}
			}
			countDownLatch.countDown();
		};
		callBacks.add(callBack);

		this.supervisor.getFileCache().addBatchPages(pageAddressList,
			gRegionContextList,
			flushEventExecutor,
			callBacks);

		try {
			countDownLatch.await();
		} catch (InterruptedException e) {
			LOG.error("persistPage failed", e);
			gContext.setDBInternalError(new GeminiRuntimeException("persistPage local failed, " + e));
		}
	}

	@Override
	public void close() throws IOException {

	}

	@Override
	public String toString() {
		return MoreObjects.toStringHelper(this).
			add("runningPersistTask", runningPersistTask.get()).
			add("runningPersistPageSize", runningPersistPageSize.get()).
			add("totalPersistPageSize", totalPersistPageSize.get()).toString();
	}
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy