Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.controller;
import org.apache.kafka.clients.admin.AlterConfigOp.OpType;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.ConfigResource.Type;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.config.types.Password;
import org.apache.kafka.common.metadata.ConfigRecord;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.ApiError;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.metadata.KafkaConfigSchema;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.mutable.BoundedList;
import org.apache.kafka.server.policy.AlterConfigPolicy;
import org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata;
import org.apache.kafka.timeline.SnapshotRegistry;
import org.apache.kafka.timeline.TimelineHashMap;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import static org.apache.kafka.clients.admin.AlterConfigOp.OpType.APPEND;
import static org.apache.kafka.common.protocol.Errors.INVALID_CONFIG;
import static org.apache.kafka.controller.QuorumController.MAX_RECORDS_PER_USER_OP;
public class ConfigurationControlManager {
public static final ConfigResource DEFAULT_NODE = new ConfigResource(Type.BROKER, "");
private final Logger log;
private final SnapshotRegistry snapshotRegistry;
private final KafkaConfigSchema configSchema;
private final Consumer existenceChecker;
private final Optional alterConfigPolicy;
private final ConfigurationValidator validator;
private final TimelineHashMap> configData;
private final Map staticConfig;
private final ConfigResource currentController;
static class Builder {
private LogContext logContext = null;
private SnapshotRegistry snapshotRegistry = null;
private KafkaConfigSchema configSchema = KafkaConfigSchema.EMPTY;
private Consumer existenceChecker = __ -> { };
private Optional alterConfigPolicy = Optional.empty();
private ConfigurationValidator validator = ConfigurationValidator.NO_OP;
private Map staticConfig = Collections.emptyMap();
private int nodeId = 0;
Builder setLogContext(LogContext logContext) {
this.logContext = logContext;
return this;
}
Builder setSnapshotRegistry(SnapshotRegistry snapshotRegistry) {
this.snapshotRegistry = snapshotRegistry;
return this;
}
Builder setKafkaConfigSchema(KafkaConfigSchema configSchema) {
this.configSchema = configSchema;
return this;
}
Builder setExistenceChecker(Consumer existenceChecker) {
this.existenceChecker = existenceChecker;
return this;
}
Builder setAlterConfigPolicy(Optional alterConfigPolicy) {
this.alterConfigPolicy = alterConfigPolicy;
return this;
}
Builder setValidator(ConfigurationValidator validator) {
this.validator = validator;
return this;
}
Builder setStaticConfig(Map staticConfig) {
this.staticConfig = staticConfig;
return this;
}
Builder setNodeId(int nodeId) {
this.nodeId = nodeId;
return this;
}
ConfigurationControlManager build() {
if (logContext == null) logContext = new LogContext();
if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext);
return new ConfigurationControlManager(
logContext,
snapshotRegistry,
configSchema,
existenceChecker,
alterConfigPolicy,
validator,
staticConfig,
nodeId);
}
}
private ConfigurationControlManager(LogContext logContext,
SnapshotRegistry snapshotRegistry,
KafkaConfigSchema configSchema,
Consumer existenceChecker,
Optional alterConfigPolicy,
ConfigurationValidator validator,
Map staticConfig,
int nodeId) {
this.log = logContext.logger(ConfigurationControlManager.class);
this.snapshotRegistry = snapshotRegistry;
this.configSchema = configSchema;
this.existenceChecker = existenceChecker;
this.alterConfigPolicy = alterConfigPolicy;
this.validator = validator;
this.configData = new TimelineHashMap<>(snapshotRegistry, 0);
this.staticConfig = Collections.unmodifiableMap(new HashMap<>(staticConfig));
this.currentController = new ConfigResource(Type.BROKER, Integer.toString(nodeId));
}
SnapshotRegistry snapshotRegistry() {
return snapshotRegistry;
}
/**
* Determine the result of applying a batch of incremental configuration changes. Note
* that this method does not change the contents of memory. It just generates a
* result, that you can replay later if you wish using replay().
*
* Note that there can only be one result per ConfigResource. So if you try to modify
* several keys and one modification fails, the whole ConfigKey fails and nothing gets
* changed.
*
* @param configChanges Maps each resource to a map from config keys to
* operation data.
* @return The result.
*/
ControllerResult