All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.jena.kafka.cmd.FK_DumpTopic2 Maven / Gradle / Ivy

There is a newer version: 1.4.0
Show newest version
/*
 *  Copyright (c) Telicent Ltd.
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

package org.apache.jena.kafka.cmd;

import java.io.PrintStream;
import java.time.Duration;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.jena.atlas.logging.LogCtl;
import org.apache.jena.kafka.KConnectorDesc;
import org.apache.jena.kafka.KafkaConnectorAssembler;
import org.apache.jena.kafka.common.DataState;
import org.apache.jena.kafka.common.DeserializerDump;
import org.apache.jena.sparql.core.assembler.AssemblerUtils;
import org.apache.jena.sys.JenaSystem;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;


// Old(er) code - uses connector.ttl
public class FK_DumpTopic2 {

    static {
        LogCtl.setLog4j2();
        JenaSystem.init();
    }

    public static void main(String... args) {
        // No args - assumes FK_Defaults.connectorFile
        LogCtl.setLog4j2();

        AssemblerUtils.registerAssembler(null, KafkaConnectorAssembler.getType(), new KafkaConnectorAssembler());
        KConnectorDesc conn = (KConnectorDesc)AssemblerUtils.build(FK_Defaults.connectorFile, KafkaConnectorAssembler.getType());

        if ( conn == null ) {
            System.err.flush();
            System.out.println();
            System.out.println("FAILED");
            return;
        }

        String topic = conn.getTopic();

        // Client-side state management.
        DataState dState = DataState.createEphemeral(conn.getTopic());
        long lastOffset = dState.getLastOffset();

        // -- Props
        Properties cProps = conn.getKafkaConsumerProps();
        StringDeserializer strDeser = new StringDeserializer();
        DeserializerDump deSer = new DeserializerDump();
        Consumer consumer = new KafkaConsumer<>(cProps, strDeser, deSer);
        TopicPartition topicPartition = new TopicPartition(topic, 0);
        consumer.assign(List.of(topicPartition));

        // Resume.
        long initialOffset = dState.getLastOffset();
        if ( initialOffset < 0 )
            consumer.seekToBeginning(List.of(topicPartition));
        else {
            System.err.println("Should be replay");
        }

        for ( ;; ) {
            boolean somethingReceived = receiver(consumer, dState);
            if ( ! somethingReceived )
                break;
        }

        System.exit(0);
    }

    // Once round the polling loop.
    private static boolean receiver(Consumer consumer, DataState dState) {
        final long lastOffsetState = dState.getLastOffset();
        long newOffset = receiverStep(dState.getLastOffset(), consumer);
        //System.out.println("Batch end");
        if ( newOffset == lastOffsetState )
            return false;
        //FmtLog.info(LOG, "Offset: %d -> %d", lastOffsetState, newOffset);
        dState.setLastOffset(newOffset);
        return true;
    }

    private final static AtomicBoolean seenFirst = new AtomicBoolean(false);
    private final static PrintStream output = System.out;

    // Do the Kafka-poll/wait.
    private static long receiverStep(final long lastOffsetState, Consumer consumer) {
        ConsumerRecords cRec = consumer.poll(Duration.ofMillis(5000));
        long lastOffset = lastOffsetState;
        int count = cRec.count();
        if ( seenFirst.get() ) {
            output.println();
            seenFirst.set(true);
        }

        boolean seenFirstInBatch = seenFirst.get();
        for ( ConsumerRecord rec : cRec ) {
            if ( seenFirstInBatch )
                output.println();
            else
                seenFirstInBatch = true;
            long offset = rec.offset();
            output.printf("==--== Offset: %d ==--------==\n", offset);
            output.print(rec.value());
            if ( offset != lastOffset+1 )
                output.printf("WARNING: Inconsistent offsets: offset=%d, lastOffset = %d\n", offset, lastOffset);
            lastOffset = offset;
        }

        return lastOffset;
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy