dev.responsive.kafka.internal.db.MongoKVFlushManager Maven / Gradle / Ivy
The newest version!
/*
*
* * Copyright 2024 Responsive Computing, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package dev.responsive.kafka.internal.db;
import com.mongodb.MongoBulkWriteException;
import com.mongodb.MongoException;
import com.mongodb.bulk.WriteConcernError;
import com.mongodb.client.MongoCollection;
import dev.responsive.kafka.internal.db.mongo.KVDoc;
import dev.responsive.kafka.internal.db.mongo.MongoKVTable;
import dev.responsive.kafka.internal.db.mongo.MongoWriter;
import dev.responsive.kafka.internal.db.partitioning.TablePartitioner;
import dev.responsive.kafka.internal.stores.RemoteWriteResult;
import org.apache.kafka.common.utils.Bytes;
import org.apache.kafka.common.utils.LogContext;
import org.slf4j.Logger;
public class MongoKVFlushManager extends KVFlushManager {
private final String logPrefix;
private final Logger log;
private final MongoKVTable table;
private final MongoCollection kvDocs;
private final TablePartitioner partitioner;
private final int kafkaPartition;
public MongoKVFlushManager(
final MongoKVTable table,
final MongoCollection kvDocs,
final int kafkaPartition,
final TablePartitioner partitioner
) {
this.table = table;
this.kvDocs = kvDocs;
this.kafkaPartition = kafkaPartition;
this.partitioner = partitioner;
logPrefix = String.format("%s[%d] kv-store {epoch=%d} ",
table.name(), kafkaPartition, table.localEpoch(kafkaPartition));
log = new LogContext(logPrefix).logger(MongoKVFlushManager.class);
}
@Override
public String tableName() {
return table.name();
}
@Override
public TablePartitioner partitioner() {
return partitioner;
}
@Override
public RemoteWriter createWriter(final Integer tablePartition) {
return new MongoWriter<>(table, kafkaPartition, tablePartition, () -> kvDocs);
}
@Override
public String failedFlushInfo(final long batchOffset, final Integer failedTablePartition) {
return String.format(", ",
batchOffset, table.fetchOffset(kafkaPartition),
table.localEpoch(kafkaPartition), table.fetchEpoch(kafkaPartition));
}
@Override
public RemoteWriteResult updateOffset(final long consumedOffset) {
try {
// TODO: should we check result.wasAcknowledged()/use a write concern?
table.setOffset(kafkaPartition, consumedOffset);
} catch (final MongoBulkWriteException e) {
log.warn("Bulk write operation failed", e);
final WriteConcernError writeConcernError = e.getWriteConcernError();
if (writeConcernError != null) {
log.warn("Bulk write operation failed due to write concern error {}", writeConcernError);
} else {
log.warn("Bulk write operation failed due to error(s): {}", e.getWriteErrors());
}
return RemoteWriteResult.failure(kafkaPartition);
} catch (final MongoException e) {
log.error("Unexpected exception running the bulk write operation", e);
throw new RuntimeException("Bulk write operation failed", e);
}
return RemoteWriteResult.success(kafkaPartition);
}
@Override
public String logPrefix() {
return logPrefix;
}
}