io.streamnative.pulsar.handlers.kop.format.KafkaV1EntryFormatter Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulsar-protocol-handler-kafka Show documentation
Show all versions of pulsar-protocol-handler-kafka Show documentation
Kafka on Pulsar implemented using Pulsar Protocol Handler
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.streamnative.pulsar.handlers.kop.format;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.streamnative.pulsar.handlers.kop.coordinator.group.TxnRecordsMetadata;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.mledger.Entry;
import org.apache.kafka.common.record.ControlRecordType;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.Record;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.pulsar.broker.service.plugin.EntryFilter;
import org.apache.pulsar.common.api.proto.MarkerType;
import org.apache.pulsar.common.api.proto.MessageMetadata;
import org.apache.pulsar.common.protocol.Commands;
/**
* The entry formatter that uses Kafka's V1/V2 format.
* This formatter don't validate all kafka records
* so call it V1 entry formatter.
*/
@Slf4j
public class KafkaV1EntryFormatter extends AbstractEntryFormatter {
protected KafkaV1EntryFormatter(List entryfilters) {
super(entryfilters);
}
@Override
public EncodeResult encode(final EncodeRequest encodeRequest) {
final MemoryRecords records = encodeRequest.getRecords();
final int numMessages = encodeRequest.getAppendInfo().numMessages();
final ByteBuf recordsWrapper = Unpooled.wrappedBuffer(records.buffer());
MessageMetadata msgMetadata = getMessageMetadataWithNumberMessages(numMessages);
attachTxnToMetadataIfExist(msgMetadata, records);
final ByteBuf buf = Commands.serializeMetadataAndPayload(
Commands.ChecksumType.None,
msgMetadata,
recordsWrapper);
recordsWrapper.release();
return EncodeResult.get(records, buf, numMessages, 0, 0L);
}
@Override
public DecodeResult decode(List entries, byte magic) {
return super.decode(entries, magic);
}
private static MessageMetadata getMessageMetadataWithNumberMessages(int numMessages) {
final MessageMetadata metadata = new MessageMetadata();
metadata.addProperty()
.setKey(IDENTITY_KEY)
.setValue(IDENTITY_VALUE);
metadata.setProducerName("");
metadata.setSequenceId(0L);
metadata.setPublishTime(System.currentTimeMillis());
metadata.setNumMessagesInBatch(numMessages);
return metadata;
}
static void attachTxnToMetadataIfExist(final MessageMetadata metadata, final MemoryRecords records) {
RecordBatch recordBatch = records.firstBatch();
if (recordBatch != null) {
if (recordBatch.isControlBatch()) {
Record record = recordBatch.iterator().next();
ControlRecordType type = ControlRecordType.parse(record.key());
TxnRecordsMetadata.attachToMetadata(metadata, recordBatch.producerId(), recordBatch.producerEpoch(),
type);
//See AbstractBaseDispatcher#filterEntriesForConsumer, filter out marker on the broker side.
metadata.setMarkerType(MarkerType.TXN_COMMIT_VALUE).setTxnidMostBits(0L).setTxnidLeastBits(0L);
} else if (recordBatch.isTransactional()) {
TxnRecordsMetadata.attachToMetadata(metadata, recordBatch.producerId(),
recordBatch.producerEpoch(), ControlRecordType.UNKNOWN);
}
}
}
}