org.apache.pulsar.metadata.impl.batching.AbstractBatchedMetadataStore Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.metadata.impl.batching;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.extern.slf4j.Slf4j;
import org.apache.pulsar.metadata.api.GetResult;
import org.apache.pulsar.metadata.api.MetadataEventSynchronizer;
import org.apache.pulsar.metadata.api.MetadataStoreConfig;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.api.Stat;
import org.apache.pulsar.metadata.api.extended.CreateOption;
import org.apache.pulsar.metadata.impl.AbstractMetadataStore;
import org.apache.pulsar.metadata.impl.stats.BatchMetadataStoreStats;
import org.jctools.queues.MessagePassingQueue;
import org.jctools.queues.MpscUnboundedArrayQueue;
@Slf4j
public abstract class AbstractBatchedMetadataStore extends AbstractMetadataStore {
private final ScheduledFuture> scheduledTask;
private final MessagePassingQueue readOps;
private final MessagePassingQueue writeOps;
private final AtomicBoolean flushInProgress = new AtomicBoolean(false);
private final boolean enabled;
private final int maxDelayMillis;
private final int maxOperations;
private final int maxSize;
private MetadataEventSynchronizer synchronizer;
private final BatchMetadataStoreStats batchMetadataStoreStats;
protected AbstractBatchedMetadataStore(MetadataStoreConfig conf) {
super(conf.getMetadataStoreName());
this.enabled = conf.isBatchingEnabled();
this.maxDelayMillis = conf.getBatchingMaxDelayMillis();
this.maxOperations = conf.getBatchingMaxOperations();
this.maxSize = conf.getBatchingMaxSizeKb() * 1_024;
if (enabled) {
readOps = new MpscUnboundedArrayQueue<>(10_000);
writeOps = new MpscUnboundedArrayQueue<>(10_000);
scheduledTask =
executor.scheduleAtFixedRate(this::flush, maxDelayMillis, maxDelayMillis, TimeUnit.MILLISECONDS);
} else {
scheduledTask = null;
readOps = null;
writeOps = null;
}
// update synchronizer and register sync listener
updateMetadataEventSynchronizer(conf.getSynchronizer());
this.batchMetadataStoreStats =
new BatchMetadataStoreStats(metadataStoreName, executor);
}
@Override
public void close() throws Exception {
if (enabled) {
// Fail all the pending items
MetadataStoreException ex =
new MetadataStoreException.AlreadyClosedException("Metadata store is getting closed");
readOps.drain(op -> op.getFuture().completeExceptionally(ex));
writeOps.drain(op -> op.getFuture().completeExceptionally(ex));
scheduledTask.cancel(true);
}
super.close();
this.batchMetadataStoreStats.close();
}
private void flush() {
while (!readOps.isEmpty()) {
List ops = new ArrayList<>();
readOps.drain(ops::add, maxOperations);
internalBatchOperation(ops);
}
while (!writeOps.isEmpty()) {
int batchSize = 0;
List ops = new ArrayList<>();
for (int i = 0; i < maxOperations; i++) {
MetadataOp op = writeOps.peek();
if (op == null) {
break;
}
if (i > 0 && (batchSize + op.size()) > maxSize) {
// We have already reached the max size, so flush the current batch
break;
}
batchSize += op.size();
ops.add(writeOps.poll());
}
internalBatchOperation(ops);
}
flushInProgress.set(false);
}
@Override
public final CompletableFuture> storeGet(String path) {
OpGet op = new OpGet(path);
enqueue(readOps, op);
return op.getFuture();
}
@Override
protected final CompletableFuture> getChildrenFromStore(String path) {
OpGetChildren op = new OpGetChildren(path);
enqueue(readOps, op);
return op.getFuture();
}
@Override
protected final CompletableFuture storeDelete(String path, Optional expectedVersion) {
OpDelete op = new OpDelete(path, expectedVersion);
enqueue(writeOps, op);
return op.getFuture();
}
@Override
protected CompletableFuture storePut(String path, byte[] data, Optional optExpectedVersion,
EnumSet options) {
OpPut op = new OpPut(path, data, optExpectedVersion, options);
enqueue(writeOps, op);
return op.getFuture();
}
@Override
public Optional getMetadataEventSynchronizer() {
return Optional.ofNullable(synchronizer);
}
@Override
public void updateMetadataEventSynchronizer(MetadataEventSynchronizer synchronizer) {
this.synchronizer = synchronizer;
registerSyncListener(Optional.ofNullable(synchronizer));
}
private void enqueue(MessagePassingQueue queue, MetadataOp op) {
if (enabled) {
if (!queue.offer(op)) {
// Execute individually if we're failing to enqueue
internalBatchOperation(Collections.singletonList(op));
return;
}
if (queue.size() > maxOperations && flushInProgress.compareAndSet(false, true)) {
executor.execute(this::flush);
}
} else {
internalBatchOperation(Collections.singletonList(op));
}
}
private void internalBatchOperation(List ops) {
long now = System.currentTimeMillis();
for (MetadataOp op : ops) {
this.batchMetadataStoreStats.recordOpWaiting(now - op.created());
}
this.batchOperation(ops);
this.batchMetadataStoreStats.recordOpsInBatch(ops.size());
this.batchMetadataStoreStats.recordBatchExecuteTime(System.currentTimeMillis() - now);
}
protected abstract void batchOperation(List ops);
}