io.prestosql.plugin.kafka.KafkaRecordSet Maven / Gradle / Ivy
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.plugin.kafka;
import com.google.common.collect.ImmutableList;
import io.airlift.log.Logger;
import io.airlift.slice.Slice;
import io.prestosql.decoder.DecoderColumnHandle;
import io.prestosql.decoder.FieldValueProvider;
import io.prestosql.decoder.RowDecoder;
import io.prestosql.spi.PrestoException;
import io.prestosql.spi.block.Block;
import io.prestosql.spi.connector.ColumnHandle;
import io.prestosql.spi.connector.RecordCursor;
import io.prestosql.spi.connector.RecordSet;
import io.prestosql.spi.type.Type;
import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import static com.google.common.base.Preconditions.checkArgument;
import static io.prestosql.decoder.FieldValueProviders.booleanValueProvider;
import static io.prestosql.decoder.FieldValueProviders.bytesValueProvider;
import static io.prestosql.decoder.FieldValueProviders.longValueProvider;
import static io.prestosql.plugin.kafka.KafkaErrorCode.KAFKA_SPLIT_ERROR;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
/**
* Kafka specific record set. Returns a cursor for a topic which iterates over a Kafka partition segment.
*/
public class KafkaRecordSet
implements RecordSet
{
private static final Logger log = Logger.get(KafkaRecordSet.class);
private static final int KAFKA_READ_BUFFER_SIZE = 100_000;
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private final KafkaSplit split;
private final KafkaSimpleConsumerManager consumerManager;
private final RowDecoder keyDecoder;
private final RowDecoder messageDecoder;
private final List columnHandles;
private final List columnTypes;
KafkaRecordSet(KafkaSplit split,
KafkaSimpleConsumerManager consumerManager,
List columnHandles,
RowDecoder keyDecoder,
RowDecoder messageDecoder)
{
this.split = requireNonNull(split, "split is null");
this.consumerManager = requireNonNull(consumerManager, "consumerManager is null");
this.keyDecoder = requireNonNull(keyDecoder, "rowDecoder is null");
this.messageDecoder = requireNonNull(messageDecoder, "rowDecoder is null");
this.columnHandles = requireNonNull(columnHandles, "columnHandles is null");
ImmutableList.Builder typeBuilder = ImmutableList.builder();
for (DecoderColumnHandle handle : columnHandles) {
typeBuilder.add(handle.getType());
}
this.columnTypes = typeBuilder.build();
}
@Override
public List getColumnTypes()
{
return columnTypes;
}
@Override
public RecordCursor cursor()
{
return new KafkaRecordCursor();
}
public class KafkaRecordCursor
implements RecordCursor
{
private long totalBytes;
private long totalMessages;
private long cursorOffset = split.getStart();
private Iterator messageAndOffsetIterator;
private final AtomicBoolean reported = new AtomicBoolean();
private final FieldValueProvider[] currentRowValues = new FieldValueProvider[columnHandles.size()];
KafkaRecordCursor()
{
}
@Override
public long getCompletedBytes()
{
return totalBytes;
}
@Override
public long getReadTimeNanos()
{
return 0;
}
@Override
public Type getType(int field)
{
checkArgument(field < columnHandles.size(), "Invalid field index");
return columnHandles.get(field).getType();
}
@Override
public boolean advanceNextPosition()
{
while (true) {
if (cursorOffset >= split.getEnd()) {
return endOfData(); // Split end is exclusive.
}
// Create a fetch request
openFetchRequest();
while (messageAndOffsetIterator.hasNext()) {
MessageAndOffset currentMessageAndOffset = messageAndOffsetIterator.next();
long messageOffset = currentMessageAndOffset.offset();
if (messageOffset >= split.getEnd()) {
return endOfData(); // Past our split end. Bail.
}
if (messageOffset >= cursorOffset) {
return nextRow(currentMessageAndOffset);
}
}
messageAndOffsetIterator = null;
}
}
private boolean endOfData()
{
if (!reported.getAndSet(true)) {
log.debug("Found a total of %d messages with %d bytes (%d messages expected). Last Offset: %d (%d, %d)",
totalMessages, totalBytes, split.getEnd() - split.getStart(),
cursorOffset, split.getStart(), split.getEnd());
}
return false;
}
private boolean nextRow(MessageAndOffset messageAndOffset)
{
cursorOffset = messageAndOffset.offset() + 1; // Cursor now points to the next message.
totalBytes += messageAndOffset.message().payloadSize();
totalMessages++;
byte[] keyData = EMPTY_BYTE_ARRAY;
byte[] messageData = EMPTY_BYTE_ARRAY;
ByteBuffer key = messageAndOffset.message().key();
if (key != null) {
keyData = new byte[key.remaining()];
key.get(keyData);
}
ByteBuffer message = messageAndOffset.message().payload();
if (message != null) {
messageData = new byte[message.remaining()];
message.get(messageData);
}
Map currentRowValuesMap = new HashMap<>();
Optional
© 2015 - 2025 Weber Informatics LLC | Privacy Policy