Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.state.gemini.engine.page;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.state.gemini.engine.GRegion;
import org.apache.flink.runtime.state.gemini.engine.exceptions.GeminiRuntimeException;
import org.apache.flink.runtime.state.gemini.engine.memstore.GSValue;
import org.apache.flink.runtime.state.gemini.engine.page.bmap.BinaryKey;
import org.apache.flink.runtime.state.gemini.engine.page.bmap.BinaryValue;
import org.apache.flink.runtime.state.gemini.engine.page.bmap.GBinaryHashMap;
import org.apache.flink.runtime.state.gemini.engine.rm.ReferenceCount.ReleaseType;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.EventExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.flink.runtime.state.gemini.engine.page.bmap.GBinaryHashMap.EMPTY_G_BINARY_HASHMAP;
/**
* PageStoreHashKVImpl.
*/
public class PageStoreHashKVImpl extends AbstractHashPageStore {
private static final Logger LOG = LoggerFactory.getLogger(PageStoreHashKVImpl.class);
public PageStoreHashKVImpl(
GRegion gRegion, EventExecutor eventExecutor) {
super(gRegion, eventExecutor);
}
public PageStoreHashKVImpl(
GRegion gRegion, PageIndex pageIndex, EventExecutor eventExecutor) {
super(gRegion, pageIndex, eventExecutor);
}
@Override
public V get(K key) {
final PageIndexContext pageIndexContext = pageIndex.getPageIndexContext(key, false);
final LogicChainedPage logicPageID = pageIndexContext.getPageID();
if (isNullPage(logicPageID)) {
return null;
}
int curIndex = logicPageID.getCurrentPageChainIndex();
V finalResult = null;
Map fetchedDataPageMap = new HashMap<>();
while (curIndex >= 0 && gContext.isDBNormal()) {
DataPage dataPage = getDataPageAutoLoadIfNeed(logicPageID, curIndex, fetchedDataPageMap);
logicPageID.getPageAddress(curIndex).addRequestCount(1);
gRegionContext.getPageStoreStats().addPageRequestCount(1);
GSValue result = (GSValue) dataPage.get(key);
dataPage.delReferenceCount(ReleaseType.Normal);
if (result != null) {
if (result.getValueType() == GValueType.Delete || gRegionContext.filterState(result.getSeqID())) {
break;
}
finalResult = result.getValue();
break;
}
curIndex--;
}
if (!gContext.isDBNormal()) {
throw new GeminiRuntimeException("DB is in abnormal status.");
}
tryLaunchCompactionByRead(pageIndexContext, logicPageID, fetchedDataPageMap);
return finalResult;
}
@Override
public void getAll(Map> container) {
// as we know, removeAll will happen after getAll in mini batch(KeyedBundleOperator), so
// there is no need to update read cache and trigger compaction
LogicChainedPage[] chains = pageIndex.getPageIndex();
for (LogicChainedPage logicChainedPage : chains) {
if (isNullPage(logicChainedPage)) {
continue;
}
int numPages = logicChainedPage.getCurrentPageChainIndex();
for (int i = numPages; i >= 0; i--) {
PageAddress pageAddress = logicChainedPage.getPageAddress(i);
DataPage dataPage = pageAddress.getDataPage();
try {
if (dataPage == null) {
this.cacheManager.getCacheStats().addPageCacheMissCount();
dataPage = this.gContext.getSupervisor().getFetchPolicy().fetch(pageAddress,
logicChainedPage,
i,
this.gRegionContext,
this.gRegionContext.getGContext().getGConfiguration().getEnablePrefetch(),
false);
} else {
this.cacheManager.getCacheStats().addPageCacheHitCount();
}
Map> data = dataPage.getPOJOMap();
for (Map.Entry> entry : data.entrySet()) {
if (!gRegionContext.filterState(entry.getValue().getSeqID())) {
container.putIfAbsent(entry.getKey(), entry.getValue());
}
}
} finally {
if (dataPage != null) {
dataPage.delReferenceCount(ReleaseType.Normal);
}
}
}
}
}
@Override
public DataPage doCompactPage(
boolean isMajor, List canCompactPageListReversedOrder, long version, int logicPageId) {
if (canCompactPageListReversedOrder == null || canCompactPageListReversedOrder.size() == 0) {
throw new GeminiRuntimeException("Interal BUG");
}
List> compactionListReversedOrder = new ArrayList<>();
for (DataPage dataPage : canCompactPageListReversedOrder) {
compactionListReversedOrder.add(dataPage.getGBinaryHashMap());
}
int index = compactionListReversedOrder.size() - 1;
Map newMap;
long compactionCount = 0;
if (gContext.hasTtl()) {
newMap = new HashMap<>();
} else {
newMap = compactionListReversedOrder.get(index).getBinaryMap();
compactionCount += compactionListReversedOrder.get(index).getCompactionCount();
index--;
}
while (index >= 0) {
newMap.putAll(compactionListReversedOrder.get(index).getBinaryMap());
compactionCount += compactionListReversedOrder.get(index).getCompactionCount();
index--;
}
GBinaryHashMap gBinaryHashMap = GBinaryHashMap.ofBinaryList(DataPage.DataPageType.KV,
isMajor,
version,
logicPageId,
this.pageSerdeFlink.getKeySerde(),
gContext.getSupervisor().getAllocator(),
newMap,
compactionCount,
gContext.getStateFilter(),
gRegionContext);
//TODO null should be handled by PageStore
return gBinaryHashMap == EMPTY_G_BINARY_HASHMAP
? null
: new DataPageKVImpl<>(gBinaryHashMap, this.pageSerdeFlink.getValueSerde());
}
@Override
BinaryValue doCompactValue(List binaryValueList, boolean isMajor, long version, int logicPageId) {
throw new GeminiRuntimeException("Internal Bug");
}
@Override
protected DataPage doBuildDataPageFromGBinaryMap(
boolean isMajor,
long version,
int logicPageId,
TypeSerializer keySerde,
Map finalCompactedMap,
long compactionCount) {
throw new GeminiRuntimeException("Internal Bug");
}
@Override
long getRequestCount(List>> dataSet) {
return dataSet.stream().map((value) -> value.f1.getRequestCount()).reduce(0, (a, b) -> a + b);
}
@Override
DataPage doCreateDataPage(long version, List>> dataSet, int logicPageId) {
GBinaryHashMap gBinaryHashMap = GBinaryHashMap.of(DataPage.DataPageType.KV,
dataSet,
this.pageSerdeFlink.getKeySerde(),
this.pageSerdeFlink.getValueSerde(),
version,
logicPageId,
gContext.getSupervisor().getAllocator(),
1,
gContext.getInPageGCompressAlgorithm());
return gBinaryHashMap == EMPTY_G_BINARY_HASHMAP
? null
: new DataPageKVImpl<>(gBinaryHashMap, this.pageSerdeFlink.getValueSerde());
}
}