org.apache.flink.runtime.state.gemini.engine.memstore.WriteBufferManagerImpl Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.state.gemini.engine.memstore;
import org.apache.flink.metrics.Gauge;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.runtime.state.gemini.engine.GRegion;
import org.apache.flink.runtime.state.gemini.engine.GTable;
import org.apache.flink.runtime.state.gemini.engine.dbms.GContext;
import org.apache.flink.runtime.state.gemini.engine.snapshot.SnapshotCompletableFuture;
import org.apache.flink.runtime.state.gemini.engine.snapshot.SnapshotManager;
import org.apache.flink.runtime.state.gemini.engine.snapshot.SnapshotOperation;
import org.apache.flink.util.Preconditions;
import org.apache.flink.shaded.guava18.com.google.common.base.MoreObjects;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.flink.runtime.state.gemini.engine.metrics.HandlerMetrics.TOTAL_WR_BUFFER_FLUSH_BLOCK;
import static org.apache.flink.runtime.state.gemini.engine.metrics.HandlerMetrics.TOTAL_WR_BUFFER_FLUSH_RECORD_COUNT;
import static org.apache.flink.runtime.state.gemini.engine.metrics.HandlerMetrics.TOTAL_WR_BUFFER_FLUSH_SEGMENT_COUNT;
import static org.apache.flink.runtime.state.gemini.engine.metrics.HandlerMetrics.TOTAL_WR_BUFFER_RECORD_COUNT;
/**
* WriteBufferManager. Single instance per DB.
*/
public class WriteBufferManagerImpl implements WriteBufferManager {
private static final Logger LOG = LoggerFactory.getLogger(WriteBufferManagerImpl.class);
private static final long MIN_WRITE_BUFFER_TOTAL_SIZE = 64 * 1024 * 1024;
private final GContext gContext;
private final AtomicLong totalWriteBufferFlushBlock = new AtomicLong(0);
private final AtomicLong totalWriteBufferRecordCount = new AtomicLong(0);
private final AtomicLong totalWriteBufferFlushingRecordCount = new AtomicLong(0);
private final AtomicLong totalWriteBufferFlushingSegmentCount = new AtomicLong(0);
private final long maxTotalWriteBufferSize;
private final AtomicInteger tableCount = new AtomicInteger(0);
private final int regionCount;
private final int totalFlushingLimitPerTable;
public WriteBufferManagerImpl(GContext gContext) {
this.gContext = Preconditions.checkNotNull(gContext);
long totalHeapMemSize = gContext.getGConfiguration().getTotalHeapMemSize();
long configTotalWriteBufferSize = (long) (totalHeapMemSize * gContext.getGConfiguration().getTotalWriteBufferRate());
if (configTotalWriteBufferSize < MIN_WRITE_BUFFER_TOTAL_SIZE) {
configTotalWriteBufferSize = MIN_WRITE_BUFFER_TOTAL_SIZE;
}
maxTotalWriteBufferSize = configTotalWriteBufferSize;
regionCount = gContext.getEndRegionId() - gContext.getStartRegionId() + 1;
this.totalFlushingLimitPerTable = (int) (gContext.getGConfiguration().getNumFlushingSegment() * regionCount * gContext.getGConfiguration().getTotalNumFlushingSegmentRatio());
LOG.info("WriteBufferManager maxTotalWriteBufferSize={}, regionCount={}, totalFlushingLimitPerTable={}",
maxTotalWriteBufferSize,
regionCount,
totalFlushingLimitPerTable);
}
@Override
public void increaseWriteBufferFlushBlock() {
totalWriteBufferFlushBlock.addAndGet(1);
}
@Override
public void addTotalRecordCount(int totalRecordCount) {
totalWriteBufferRecordCount.addAndGet(totalRecordCount);
}
@Override
public long getTotalRecordCount() {
return totalWriteBufferRecordCount.get();
}
@Override
public void addTotalFlushingRecordCount(int totalRecordCount) {
totalWriteBufferFlushingRecordCount.addAndGet(totalRecordCount);
}
@Override
public void addTotalFlushingSegmentCount(int flushingSegmentCount) {
totalWriteBufferFlushingSegmentCount.addAndGet(flushingSegmentCount);
}
@Override
public long getTotalMemSize() {
return maxTotalWriteBufferSize;
}
@Override
public boolean isBestChoiceWriteBufferFlushing(WriteBuffer writeBuffer) {
//if best choice is not this writeBuffer, manager will flush the best choice writebuffer. and return false.
//if best choice is this write buffer, directly return true. then writeBuffer will flush itself.
//TODO judge this writeBuffer can be choose to flush. and choice the best one to flush.
return true;
}
@Override
public boolean canFlushWriteBuffer(WriteBuffer writeBuffer) {
int tableNum = tableCount.get();
return totalFlushingLimitPerTable * (tableNum == 0 ? 1 : tableNum) > totalWriteBufferFlushingSegmentCount.get();
}
@Override
public void addTableNum(String tableName) {
tableCount.addAndGet(1);
LOG.info("WriteBufferManager add tableName={}, now totalTables={}", tableName, tableCount.get());
}
@Override
public void doSnapshot(SnapshotOperation snapshotOperation) {
SnapshotManager.PendingSnapshot pendingSnapshot = snapshotOperation.getPendingSnapshot();
long checkpointId = pendingSnapshot.getCheckpointId();
SnapshotCompletableFuture snapshotCompletableFuture = pendingSnapshot.getResultFuture();
snapshotCompletableFuture.incRunningTask();
LOG.info("WriteBufferManagerImpl start to snapshot for {}", checkpointId);
Map tables = gContext.getGeminiDB().getGeminiTableMap();
for (GTable table : tables.values()) {
Iterator regionIterator = table.regionIterator();
while (regionIterator.hasNext()) {
GRegion region = regionIterator.next();
region.getWriteBuffer().doSnapshot(snapshotOperation);
}
Iterator indexRegionIterator = table.indexRegionIterator();
while (indexRegionIterator.hasNext()) {
GRegion region = indexRegionIterator.next();
region.getWriteBuffer().doSnapshot(snapshotOperation);
}
}
snapshotCompletableFuture.decRunningTask();
LOG.info("WriteBufferManagerImpl end snapshot for {}", checkpointId);
}
@Override
public void registerMetrics(MetricGroup metricGroup) {
metricGroup.gauge(TOTAL_WR_BUFFER_FLUSH_BLOCK, (Gauge) () -> totalWriteBufferFlushBlock.get());
metricGroup.gauge(TOTAL_WR_BUFFER_RECORD_COUNT, (Gauge) () -> totalWriteBufferRecordCount.get());
metricGroup.gauge(TOTAL_WR_BUFFER_FLUSH_RECORD_COUNT,
(Gauge) () -> totalWriteBufferFlushingRecordCount.get());
metricGroup.gauge(TOTAL_WR_BUFFER_FLUSH_SEGMENT_COUNT,
(Gauge) () -> totalWriteBufferFlushingSegmentCount.get());
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).
add("totalWriteBufferFlushBlock", totalWriteBufferFlushBlock).
add("totalWriteBufferRecordCount", totalWriteBufferRecordCount).
add("totalWriteBufferFlushingRecordCount", totalWriteBufferFlushingRecordCount).
add("totalWriteBufferFlushingSegmentCount", totalWriteBufferFlushingSegmentCount).
add("maxTotalWriteBufferSize", maxTotalWriteBufferSize).
add("tableCount", tableCount).
add("regionCount", regionCount).toString();
}
}