io.streamnative.pulsar.handlers.kop.format.KafkaV1EntryFormatter Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulsar-protocol-handler-kafka Show documentation
Show all versions of pulsar-protocol-handler-kafka Show documentation
Kafka on Pulsar implemented using Pulsar Protocol Handler
/**
* Copyright (c) 2019 - 2024 StreamNative, Inc.. All Rights Reserved.
*/
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.streamnative.pulsar.handlers.kop.format;
import static io.streamnative.pulsar.handlers.kop.utils.KopLogValidator.validateKey;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.streamnative.pulsar.handlers.kop.coordinator.group.TxnRecordsMetadata;
import io.streamnative.pulsar.handlers.kop.storage.PartitionLog;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.mledger.Entry;
import org.apache.kafka.common.record.ControlRecordType;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.MutableRecordBatch;
import org.apache.kafka.common.record.Record;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.pulsar.broker.service.plugin.EntryFilter;
import org.apache.pulsar.common.api.proto.MarkerType;
import org.apache.pulsar.common.api.proto.MessageMetadata;
import org.apache.pulsar.common.protocol.Commands;
/**
* The entry formatter that uses Kafka's V1/V2 format.
* This formatter don't validate all kafka records
* so call it V1 entry formatter.
*/
@Slf4j
public class KafkaV1EntryFormatter extends AbstractEntryFormatter {
protected KafkaV1EntryFormatter(List entryfilters) {
super(entryfilters);
}
@Override
public EncodeResult encode(final EncodeRequest encodeRequest) {
final MemoryRecords records = encodeRequest.getRecords();
final PartitionLog.LogAppendInfo appendInfo = encodeRequest.getAppendInfo();
final int numMessages = appendInfo.numMessages();
if (encodeRequest.getAppendInfo().compactedTopic()) {
for (MutableRecordBatch batch : records.batches()) {
for (Record record : batch) {
validateKey(record, true);
}
}
}
final ByteBuf recordsWrapper = Unpooled.wrappedBuffer(records.buffer());
MessageMetadata msgMetadata = getMessageMetadataWithNumberMessages(appendInfo);
attachTxnToMetadataIfExist(msgMetadata, records);
final ByteBuf buf = Commands.serializeMetadataAndPayload(
Commands.ChecksumType.None,
msgMetadata,
recordsWrapper);
recordsWrapper.release();
return EncodeResult.get(records, buf, numMessages, 0, 0L);
}
@Override
public DecodeResult decode(List entries, byte magic) {
return super.decode(entries, magic);
}
static void attachTxnToMetadataIfExist(final MessageMetadata metadata, final MemoryRecords records) {
RecordBatch recordBatch = records.firstBatch();
if (recordBatch != null) {
if (recordBatch.isControlBatch()) {
Record record = recordBatch.iterator().next();
ControlRecordType type = ControlRecordType.parse(record.key());
TxnRecordsMetadata.attachToMetadata(metadata, recordBatch.producerId(), recordBatch.producerEpoch(),
type);
//See AbstractBaseDispatcher#filterEntriesForConsumer, filter out marker on the broker side.
metadata.setMarkerType(MarkerType.TXN_COMMIT_VALUE).setTxnidMostBits(0L).setTxnidLeastBits(0L);
} else if (recordBatch.isTransactional()) {
TxnRecordsMetadata.attachToMetadata(metadata, recordBatch.producerId(),
recordBatch.producerEpoch(), ControlRecordType.UNKNOWN);
}
}
}
}