All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.kafka.streams.kstream.internals.KTableKTableLeftJoin Maven / Gradle / Ivy

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.kafka.streams.kstream.internals;

import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.streams.kstream.ValueJoiner;
import org.apache.kafka.streams.processor.To;
import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl;
import org.apache.kafka.streams.state.ValueAndTimestamp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor;
import static org.apache.kafka.streams.processor.internals.RecordQueue.UNKNOWN;
import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull;

@SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated.
class KTableKTableLeftJoin extends KTableKTableAbstractJoin {
    private static final Logger LOG = LoggerFactory.getLogger(KTableKTableLeftJoin.class);

    KTableKTableLeftJoin(final KTableImpl table1,
                         final KTableImpl table2,
                         final ValueJoiner joiner) {
        super(table1, table2, joiner);
    }

    @Override
    public org.apache.kafka.streams.processor.Processor> get() {
        return new KTableKTableLeftJoinProcessor(valueGetterSupplier2.get());
    }

    @Override
    public KTableValueGetterSupplier view() {
        return new KTableKTableLeftJoinValueGetterSupplier(valueGetterSupplier1, valueGetterSupplier2);
    }

    private class KTableKTableLeftJoinValueGetterSupplier extends KTableKTableAbstractJoinValueGetterSupplier {

        KTableKTableLeftJoinValueGetterSupplier(final KTableValueGetterSupplier valueGetterSupplier1,
                                                final KTableValueGetterSupplier valueGetterSupplier2) {
            super(valueGetterSupplier1, valueGetterSupplier2);
        }

        public KTableValueGetter get() {
            return new KTableKTableLeftJoinValueGetter(valueGetterSupplier1.get(), valueGetterSupplier2.get());
        }
    }


    private class KTableKTableLeftJoinProcessor extends org.apache.kafka.streams.processor.AbstractProcessor> {

        private final KTableValueGetter valueGetter;
        private Sensor droppedRecordsSensor;

        KTableKTableLeftJoinProcessor(final KTableValueGetter valueGetter) {
            this.valueGetter = valueGetter;
        }

        @Override
        public void init(final org.apache.kafka.streams.processor.ProcessorContext context) {
            super.init(context);
            droppedRecordsSensor = droppedRecordsSensor(
                Thread.currentThread().getName(),
                context.taskId().toString(),
                (StreamsMetricsImpl) context.metrics()
            );
            valueGetter.init(context);
        }

        @Override
        public void process(final K key, final Change change) {
            // we do join iff keys are equal, thus, if key is null we cannot join and just ignore the record
            if (key == null) {
                LOG.warn(
                    "Skipping record due to null key. change=[{}] topic=[{}] partition=[{}] offset=[{}]",
                    change, context().topic(), context().partition(), context().offset()
                );
                droppedRecordsSensor.record();
                return;
            }

            R newValue = null;
            final long resultTimestamp;
            R oldValue = null;

            final ValueAndTimestamp valueAndTimestampRight = valueGetter.get(key);
            final V2 value2 = getValueOrNull(valueAndTimestampRight);
            final long timestampRight;

            if (value2 == null) {
                if (change.newValue == null && change.oldValue == null) {
                    return;
                }
                timestampRight = UNKNOWN;
            } else {
                timestampRight = valueAndTimestampRight.timestamp();
            }

            resultTimestamp = Math.max(context().timestamp(), timestampRight);

            if (change.newValue != null) {
                newValue = joiner.apply(change.newValue, value2);
            }

            if (sendOldValues && change.oldValue != null) {
                oldValue = joiner.apply(change.oldValue, value2);
            }

            context().forward(key, new Change<>(newValue, oldValue), To.all().withTimestamp(resultTimestamp));
        }

        @Override
        public void close() {
            valueGetter.close();
        }
    }

    private class KTableKTableLeftJoinValueGetter implements KTableValueGetter {

        private final KTableValueGetter valueGetter1;
        private final KTableValueGetter valueGetter2;

        KTableKTableLeftJoinValueGetter(final KTableValueGetter valueGetter1,
                                        final KTableValueGetter valueGetter2) {
            this.valueGetter1 = valueGetter1;
            this.valueGetter2 = valueGetter2;
        }

        @Override
        public void init(final org.apache.kafka.streams.processor.ProcessorContext context) {
            valueGetter1.init(context);
            valueGetter2.init(context);
        }

        @Override
        public ValueAndTimestamp get(final K key) {
            final ValueAndTimestamp valueAndTimestamp1 = valueGetter1.get(key);
            final V1 value1 = getValueOrNull(valueAndTimestamp1);

            if (value1 != null) {
                final ValueAndTimestamp valueAndTimestamp2 = valueGetter2.get(key);
                final V2 value2 = getValueOrNull(valueAndTimestamp2);
                final long resultTimestamp;
                if (valueAndTimestamp2 == null) {
                    resultTimestamp = valueAndTimestamp1.timestamp();
                } else {
                    resultTimestamp = Math.max(valueAndTimestamp1.timestamp(), valueAndTimestamp2.timestamp());
                }
                return ValueAndTimestamp.make(joiner.apply(value1, value2), resultTimestamp);
            } else {
                return null;
            }
        }

        @Override
        public void close() {
            valueGetter1.close();
            valueGetter2.close();
        }
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy