Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.runtime;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.Config;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.ConfigKey;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.config.ConfigTransformer;
import org.apache.kafka.common.config.ConfigValue;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.connect.connector.Connector;
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.errors.NotFoundException;
import org.apache.kafka.connect.runtime.isolation.LoaderSwap;
import org.apache.kafka.connect.runtime.isolation.Plugins;
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
import org.apache.kafka.connect.runtime.rest.entities.ConfigKeyInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConfigValueInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorType;
import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel;
import org.apache.kafka.connect.runtime.rest.entities.Message;
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
import org.apache.kafka.connect.sink.SinkConnector;
import org.apache.kafka.connect.source.SourceConnector;
import org.apache.kafka.connect.storage.ClusterConfigState;
import org.apache.kafka.connect.storage.ConfigBackingStore;
import org.apache.kafka.connect.storage.Converter;
import org.apache.kafka.connect.storage.ConverterConfig;
import org.apache.kafka.connect.storage.ConverterType;
import org.apache.kafka.connect.storage.HeaderConverter;
import org.apache.kafka.connect.storage.StatusBackingStore;
import org.apache.kafka.connect.transforms.Transformation;
import org.apache.kafka.connect.transforms.predicates.Predicate;
import org.apache.kafka.connect.util.Callback;
import org.apache.kafka.connect.util.ConnectorTaskId;
import org.apache.kafka.connect.util.Stage;
import org.apache.kafka.connect.util.TemporaryStage;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG;
import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG;
import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG;
/**
* Abstract Herder implementation which handles connector/task lifecycle tracking. Extensions
* must invoke the lifecycle hooks appropriately.
*
* This class takes the following approach for sending status updates to the backing store:
*
*
*
* When the connector or task is starting, we overwrite the previous state blindly. This ensures that
* every rebalance will reset the state of tasks to the proper state. The intuition is that there should
* be less chance of write conflicts when the worker has just received its assignment and is starting tasks.
* In particular, this prevents us from depending on the generation absolutely. If the group disappears
* and the generation is reset, then we'll overwrite the status information with the older (and larger)
* generation with the updated one. The danger of this approach is that slow starting tasks may cause the
* status to be overwritten after a rebalance has completed.
*
*
* If the connector or task fails or is shutdown, we use {@link StatusBackingStore#putSafe(ConnectorStatus)},
* which provides a little more protection if the worker is no longer in the group (in which case the
* task may have already been started on another worker). Obviously this is still racy. If the task has just
* started on another worker, we may not have the updated status cached yet. In this case, we'll overwrite
* the value which will cause the state to be inconsistent (most likely until the next rebalance). Until
* we have proper producer groups with fenced groups, there is not much else we can do.
*
*/
public abstract class AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener {
private static final Logger log = LoggerFactory.getLogger(AbstractHerder.class);
private final String workerId;
protected final Worker worker;
private final String kafkaClusterId;
protected final StatusBackingStore statusBackingStore;
protected final ConfigBackingStore configBackingStore;
private volatile boolean ready = false;
private final ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy;
private final ExecutorService connectorExecutor;
private final Time time;
protected final Loggers loggers;
private final ConcurrentMap tempConnectors = new ConcurrentHashMap<>();
public AbstractHerder(Worker worker,
String workerId,
String kafkaClusterId,
StatusBackingStore statusBackingStore,
ConfigBackingStore configBackingStore,
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy,
Time time) {
this.worker = worker;
this.worker.herder = this;
this.workerId = workerId;
this.kafkaClusterId = kafkaClusterId;
this.statusBackingStore = statusBackingStore;
this.configBackingStore = configBackingStore;
this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy;
this.connectorExecutor = Executors.newCachedThreadPool();
this.time = time;
this.loggers = new Loggers(time);
}
@Override
public String kafkaClusterId() {
return kafkaClusterId;
}
protected abstract int generation();
protected void startServices() {
this.worker.start();
this.statusBackingStore.start();
this.configBackingStore.start();
}
protected void stopServices() {
this.statusBackingStore.stop();
this.configBackingStore.stop();
this.worker.stop();
this.connectorExecutor.shutdown();
Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy");
}
protected void ready() {
this.ready = true;
}
@Override
public boolean isReady() {
return ready;
}
@Override
public void onStartup(String connector) {
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RUNNING,
workerId, generation()));
}
@Override
public void onStop(String connector) {
statusBackingStore.put(new ConnectorStatus(connector, AbstractStatus.State.STOPPED,
workerId, generation()));
}
@Override
public void onPause(String connector) {
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.PAUSED,
workerId, generation()));
}
@Override
public void onResume(String connector) {
statusBackingStore.put(new ConnectorStatus(connector, TaskStatus.State.RUNNING,
workerId, generation()));
}
@Override
public void onShutdown(String connector) {
statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.UNASSIGNED,
workerId, generation()));
}
@Override
public void onFailure(String connector, Throwable cause) {
statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.FAILED,
trace(cause), workerId, generation()));
}
@Override
public void onStartup(ConnectorTaskId id) {
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
}
@Override
public void onFailure(ConnectorTaskId id, Throwable cause) {
statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause)));
}
@Override
public void onShutdown(ConnectorTaskId id) {
statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation()));
}
@Override
public void onResume(ConnectorTaskId id) {
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
}
@Override
public void onPause(ConnectorTaskId id) {
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation()));
}
@Override
public void onDeletion(String connector) {
for (TaskStatus status : statusBackingStore.getAll(connector))
onDeletion(status.id());
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation()));
}
@Override
public void onDeletion(ConnectorTaskId id) {
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation()));
}
public void onRestart(String connector) {
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RESTARTING,
workerId, generation()));
}
public void onRestart(ConnectorTaskId id) {
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RESTARTING, workerId, generation()));
}
@Override
public void pauseConnector(String connector) {
if (!configBackingStore.contains(connector))
throw new NotFoundException("Unknown connector " + connector);
configBackingStore.putTargetState(connector, TargetState.PAUSED);
}
@Override
public void resumeConnector(String connector) {
if (!configBackingStore.contains(connector))
throw new NotFoundException("Unknown connector " + connector);
configBackingStore.putTargetState(connector, TargetState.STARTED);
}
@Override
public Plugins plugins() {
return worker.getPlugins();
}
/*
* Retrieves raw config map by connector name.
*/
protected abstract Map rawConfig(String connName);
@Override
public void connectorConfig(String connName, Callback