org.apache.kafka.streams.processor.internals.GlobalStateUpdateTask Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
import org.apache.kafka.streams.errors.StreamsException;
import org.apache.kafka.streams.processor.api.Record;
import org.slf4j.Logger;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensorOrSkippedRecordsSensor;
/**
* Updates the state for all Global State Stores.
*/
public class GlobalStateUpdateTask implements GlobalStateMaintainer {
private final Logger log;
private final LogContext logContext;
private final ProcessorTopology topology;
private final InternalProcessorContext processorContext;
private final Map offsets = new HashMap<>();
private final Map deserializers = new HashMap<>();
private final GlobalStateManager stateMgr;
private final DeserializationExceptionHandler deserializationExceptionHandler;
public GlobalStateUpdateTask(final LogContext logContext,
final ProcessorTopology topology,
final InternalProcessorContext processorContext,
final GlobalStateManager stateMgr,
final DeserializationExceptionHandler deserializationExceptionHandler) {
this.logContext = logContext;
this.log = logContext.logger(getClass());
this.topology = topology;
this.stateMgr = stateMgr;
this.processorContext = processorContext;
this.deserializationExceptionHandler = deserializationExceptionHandler;
}
/**
* @throws IllegalStateException If store gets registered after initialized is already finished
* @throws StreamsException If the store's change log does not contain the partition
*/
@Override
public Map initialize() {
final Set storeNames = stateMgr.initialize();
final Map storeNameToTopic = topology.storeToChangelogTopic();
for (final String storeName : storeNames) {
final String sourceTopic = storeNameToTopic.get(storeName);
final SourceNode source = topology.source(sourceTopic);
deserializers.put(
sourceTopic,
new RecordDeserializer(
source,
deserializationExceptionHandler,
logContext,
droppedRecordsSensorOrSkippedRecordsSensor(
Thread.currentThread().getName(),
processorContext.taskId().toString(),
processorContext.metrics()
)
)
);
}
initTopology();
processorContext.initialize();
return stateMgr.changelogOffsets();
}
@SuppressWarnings("unchecked")
@Override
public void update(final ConsumerRecord record) {
final RecordDeserializer sourceNodeAndDeserializer = deserializers.get(record.topic());
final ConsumerRecord