All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.runtime.state.gemini.engine.dbms.SupervisorImpl Maven / Gradle / Ivy

There is a newer version: 1.5.1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.runtime.state.gemini.engine.dbms;

import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.gemini.engine.GConfiguration;
import org.apache.flink.runtime.state.gemini.engine.GRegionContext;
import org.apache.flink.runtime.state.gemini.engine.GTable;
import org.apache.flink.runtime.state.gemini.engine.exceptions.GeminiRuntimeException;
import org.apache.flink.runtime.state.gemini.engine.filecache.FileCache;
import org.apache.flink.runtime.state.gemini.engine.filecache.InfiniteCapacityFileCache;
import org.apache.flink.runtime.state.gemini.engine.filecache.NoCapacityFileCache;
import org.apache.flink.runtime.state.gemini.engine.fs.FileCleaner;
import org.apache.flink.runtime.state.gemini.engine.fs.FileCleanerImpl;
import org.apache.flink.runtime.state.gemini.engine.fs.FileManager;
import org.apache.flink.runtime.state.gemini.engine.fs.FileManagerImpl;
import org.apache.flink.runtime.state.gemini.engine.handler.GeminiEventExecutorGroup;
import org.apache.flink.runtime.state.gemini.engine.memstore.WriteBufferManager;
import org.apache.flink.runtime.state.gemini.engine.memstore.WriteBufferManagerImpl;
import org.apache.flink.runtime.state.gemini.engine.page.DataPage;
import org.apache.flink.runtime.state.gemini.engine.page.DfsDataPageUtil;
import org.apache.flink.runtime.state.gemini.engine.page.LocalDataPageUtil;
import org.apache.flink.runtime.state.gemini.engine.page.PageAddress;
import org.apache.flink.runtime.state.gemini.engine.rm.Allocator;
import org.apache.flink.runtime.state.gemini.engine.rm.PoolAllocatorNettyImpl;
import org.apache.flink.runtime.state.gemini.engine.rm.ReferenceCount.ReleaseType;
import org.apache.flink.runtime.state.gemini.engine.rm.UnpoolAllocatorImpl;
import org.apache.flink.runtime.state.gemini.engine.snapshot.BackendSnapshotMeta;
import org.apache.flink.runtime.state.gemini.engine.snapshot.SnapshotManager;
import org.apache.flink.runtime.state.gemini.engine.snapshot.SnapshotManagerImpl;
import org.apache.flink.runtime.state.gemini.engine.vm.CacheManager;
import org.apache.flink.runtime.state.gemini.engine.vm.CacheManagerImpl;
import org.apache.flink.runtime.state.gemini.engine.vm.DataPageLRU;
import org.apache.flink.runtime.state.gemini.engine.vm.FetchPolicy;
import org.apache.flink.runtime.state.gemini.engine.vm.FetchPolicyImpl;
import org.apache.flink.runtime.state.gemini.engine.vm.FutureDataPage;
import org.apache.flink.util.Preconditions;

import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.EventExecutorGroup;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadFactory;

/**
 * GeminiSupervisor.
 */
public class SupervisorImpl implements Supervisor {
	private static final Logger LOG = LoggerFactory.getLogger(SupervisorImpl.class);
	private final GContext gContext;

	private final Allocator allocator;

	//used by some compaction value, such as list value/map value. recommend to use unpool and on-heap.
	private final Allocator defaultAllocator = new UnpoolAllocatorImpl();

	private final Allocator forReadAllocator;

	private final WriteBufferManager writeBufferManager;

	private final CacheManager cacheManager;

	private final SnapshotManager snapshotManager;

	private final FileManager localFileManager;

	private final FileManager dfsFileManager;

	private final FileCache fileCache;

	private final FileCleaner fileCleaner;

	private final EventExecutorGroup regionExecutorGroup;

	private final EventExecutorGroup flusherExecutorGroup;

	//TODO #SR will unify the other executor groups after using deaggregative mode.
	//TODO #SR current writer just will be closed by EventExecutorGroup, so multiple checkpoint
	//  will reuse the same underlying file, do we need to handle this?
	private final EventExecutorGroup snapshotExecutorGroup;

	private final EventExecutorGroup compactionExecutorGroup;

	private final FetchPolicy fetchPolicy;

	public final DiscardOrEvictPageReleaseManager discardOrEvictPageReleaseManager;

	public SupervisorImpl(GContext gContext) {
		gContext.setSupervisor(this);
		this.gContext = Preconditions.checkNotNull(gContext);

		this.writeBufferManager = new WriteBufferManagerImpl(gContext);

		GConfiguration gConfiguration = gContext.getGConfiguration();

		this.cacheManager = new CacheManagerImpl(this.gContext);

		discardOrEvictPageReleaseManager = new DiscardOrEvictPageReleaseManager(this.gContext);

		//for normal use, such as write page.
		if (gConfiguration.getUseOffheap()) {
			//if use offheap globally.
			forReadAllocator = new PoolAllocatorNettyImpl(gConfiguration, discardOrEvictPageReleaseManager);
			this.allocator = forReadAllocator;
		} else {
			this.allocator = new UnpoolAllocatorImpl();
			//for read, we recommend to use forReadAllocator. because it provide better performance for IO access.
			if (gConfiguration.getForceReadUseOffheap()) {
				forReadAllocator = new PoolAllocatorNettyImpl(gConfiguration, discardOrEvictPageReleaseManager);
			} else {
				forReadAllocator = allocator;
			}
		}

		this.fileCleaner = new FileCleanerImpl(gContext);

		this.localFileManager = new FileManagerImpl(gContext,
			"local",
			new Path(gConfiguration.getLocalPath()),
			false,
			new LocalDataPageUtil(forReadAllocator, gConfiguration.isChecksumEnable()));
		this.fileCleaner.registerFileManager(this.localFileManager);

		this.dfsFileManager = new FileManagerImpl(gContext,
			"dfs",
			new Path(gConfiguration.getDfsPath()),
			true,
			new DfsDataPageUtil(gConfiguration.isChecksumEnable()));
		this.fileCleaner.registerFileManager(this.dfsFileManager);

		// TODO create different file cache according to configuration
		long fileCacheCapacity = gConfiguration.getFileCacheCapacity();
		if (fileCacheCapacity == FileCache.INFINITE_CAPACITY) {
			this.fileCache = new InfiniteCapacityFileCache(gContext, localFileManager, dfsFileManager);
		} else if (fileCacheCapacity <= 0) {
			this.fileCache = new NoCapacityFileCache(gContext, dfsFileManager);
		} else {
			throw new GeminiRuntimeException("unsupported file cache capacity " + fileCacheCapacity);
		}

		this.snapshotManager = new SnapshotManagerImpl(this.gContext, writeBufferManager, localFileManager, dfsFileManager);
		this.fetchPolicy = new FetchPolicyImpl(gContext,
			cacheManager.getCacheStats(),
			new DataPageLRU<>(cacheManager.getReadPageCacheLRUSize(),
				new DataPageLRU.DataPageLRUFuction() {
					@Override
					public int size(FutureDataPage value) {
						return value.getSize();
					}

					@Override
					public void removed(FutureDataPage value) {
						value.removed();
					}
				}));

		String prefix = gConfiguration.getExcetorPrefixName();
		ThreadFactory regionThreadFactory = new ThreadFactoryBuilder().setNameFormat(prefix + "geminiRegion-%d").build();
		this.regionExecutorGroup = new GeminiEventExecutorGroup(gContext.getGConfiguration().getRegionThreadNum(),
			regionThreadFactory,
			gContext.getGConfiguration().getCommonThreadSleepTimeNs(), gContext);
		ThreadFactory flushThreadFactory = new ThreadFactoryBuilder().setNameFormat(prefix + "geminiFlush-%d").build();
		this.flusherExecutorGroup = new GeminiEventExecutorGroup(gContext.getGConfiguration().getFlushThreadNum(),
			flushThreadFactory,
			gContext.getGConfiguration().getCommonThreadSleepTimeNs(), gContext);
		ThreadFactory snapshotThreadFactory = new ThreadFactoryBuilder().setNameFormat(prefix + "geminiSnapshot-%d").build();
		this.snapshotExecutorGroup = new GeminiEventExecutorGroup(gContext.getGConfiguration().getSnapshotThreadNum(),
			snapshotThreadFactory,
			gContext.getGConfiguration().getCommonThreadSleepTimeNs(), gContext);
		ThreadFactory compactionThreadFactory = new ThreadFactoryBuilder().setNameFormat(prefix + "geminiCompaction-%d").build();
		this.compactionExecutorGroup = new GeminiEventExecutorGroup(gContext.getGConfiguration().getCompactionThreadNum(),
			compactionThreadFactory,
			gContext.getGConfiguration().getCommonThreadSleepTimeNs(), gContext);
		LOG.info("Supervisor is created");
	}

	@Override
	public void start() {
		this.cacheManager.start();
		this.fileCleaner.start();
		this.localFileManager.start();
		this.dfsFileManager.start();
		LOG.info("Supervisor is started");
	}

	@Override
	public void close() {
		regionExecutorGroup.shutdownGracefully();
		flusherExecutorGroup.shutdownGracefully();
		compactionExecutorGroup.shutdownGracefully();
		snapshotExecutorGroup.shutdownGracefully();
		closeQueitly(fetchPolicy, "FetchPolicy");
		closeQueitly(cacheManager, "CacheManager");
		closeQueitly(snapshotManager, "SnapshotManager");
		closeQueitly(fileCache, "FileCache");
		closeQueitly(localFileManager, "LocalFileManager");
		closeQueitly(dfsFileManager, "DFSFileManager");
		closeQueitly(fileCleaner, "FileCleaner");
	}

	@Override
	public void startSnapshot(BackendSnapshotMeta backendSnapshotMeta) throws IOException {
		snapshotManager.startSnapshot(backendSnapshotMeta);
	}

	@Override
	public SnapshotManager.PendingSnapshot getPendingSnapshot(long checkpointId) {
		return snapshotManager.getPendingSnapshot(checkpointId);
	}

	@Override
	public Allocator getAllocator() {
		return allocator;
	}

	@Override
	public Allocator getDefaultAllocator() {
		return defaultAllocator;
	}

	@Override
	public Allocator getForReadAllocator() {
		return forReadAllocator;
	}

	@Override
	public WriteBufferManager getWriteBufferManager() {
		return this.writeBufferManager;
	}

	@Override
	public CacheManager getCacheManager() {
		return this.cacheManager;
	}

	@Override
	public SnapshotManager getSnapshotManager() {
		return this.snapshotManager;
	}

	@Override
	public FileManager getLocalFileManager() {
		return localFileManager;
	}

	@Override
	public FileManager getDfsFileManager() {
		return dfsFileManager;
	}

	@Override
	public FileCache getFileCache() {
		return fileCache;
	}

	@Override
	public FileCleaner getFileCleaner() {
		return fileCleaner;
	}

	@Override
	public EventExecutorGroup getRegionExecutorGroup() {
		return regionExecutorGroup;
	}

	@Override
	public EventExecutorGroup getFlushExecutorGroup() {
		return flusherExecutorGroup;
	}

	@Override
	public EventExecutorGroup getSnapshotExecutorGroup() {
		return snapshotExecutorGroup;
	}

	@Override
	public EventExecutorGroup getCompactionExecutorGroup() {
		return compactionExecutorGroup;
	}

	@Override
	public void discardPage(GRegionContext gRegionContext, List pageAddressList) {
		for (PageAddress pageAddress : pageAddressList) {
			fileCache.discardPage(pageAddress, gRegionContext, null);
		}
		//TODO reduce GByteBuffer reference count. when discardPage, if reference is not 0, we will keep looking it.
		// make sure it can be removed.
		for (PageAddress pageAddress : pageAddressList) {
			DataPage dataPage = pageAddress.getDataPageNoReference();
			if (dataPage != null) {
				if (dataPage.getGBinaryHashMap().getGByteBuffer().getCnt() != 1) {
					discardOrEvictPageReleaseManager.addMonitorPageStillHaveReference(dataPage.getGBinaryHashMap().getGByteBuffer(),
						ReleaseType.Discard,
						pageAddress);
				}
				dataPage.delReferenceCount(ReleaseType.Discard);
			}
		}
	}

	@Override
	public Map getAllTables() {
		return gContext.getGeminiDB().getGeminiTableMap();
	}

	@Override
	public FetchPolicy getFetchPolicy() {
		return this.fetchPolicy;
	}

	@Override
	public DiscardOrEvictPageReleaseManager getDiscardOrEvictPageReleaseManager() {
		return discardOrEvictPageReleaseManager;
	}

	private void closeQueitly(Closeable closeable, String closeableName) {
		try {
			closeable.close();
		} catch (Exception e) {
			LOG.error("Failed to close {}, {}", closeableName, e);
		}
	}
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy