
org.redisson.connection.ClusterConnectionManager Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of redisson Show documentation
Show all versions of redisson Show documentation
Redis Java client with features of In-Memory Data Grid
/**
* Copyright (c) 2013-2024 Nikita Koksharov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.redisson.connection;
import io.netty.buffer.ByteBuf;
import io.netty.util.Timeout;
import org.redisson.api.NodeType;
import org.redisson.api.RFuture;
import org.redisson.client.*;
import org.redisson.client.codec.StringCodec;
import org.redisson.client.protocol.RedisCommands;
import org.redisson.client.protocol.RedisStrictCommand;
import org.redisson.client.protocol.decoder.ClusterNodesDecoder;
import org.redisson.client.protocol.decoder.ObjectDecoder;
import org.redisson.cluster.ClusterNodeInfo;
import org.redisson.cluster.ClusterNodeInfo.Flag;
import org.redisson.cluster.ClusterPartition;
import org.redisson.cluster.ClusterPartition.Type;
import org.redisson.config.*;
import org.redisson.misc.RedisURI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
*
* @author Nikita Koksharov
*
*/
public class ClusterConnectionManager extends MasterSlaveConnectionManager {
private final Logger log = LoggerFactory.getLogger(getClass());
private final Map lastPartitions = new ConcurrentHashMap<>();
private final Map lastUri2Partition = new ConcurrentHashMap<>();
private volatile Timeout monitorFuture;
private volatile RedisURI lastClusterNode;
private RedisStrictCommand> clusterNodesCommand;
private String configEndpointHostName;
private final AtomicReferenceArray slot2entry = new AtomicReferenceArray<>(MAX_SLOT);
private final Map client2entry = new ConcurrentHashMap<>();
private ClusterServersConfig cfg;
ClusterConnectionManager(ClusterServersConfig cfg, Config configCopy) {
super(cfg, configCopy);
this.serviceManager.setNatMapper(cfg.getNatMapper());
}
@Override
protected MasterSlaveServersConfig create(BaseMasterSlaveServersConfig> cfg) {
this.cfg = (ClusterServersConfig) cfg;
return super.create(cfg);
}
@Override
public void doConnect(Function hostnameMapper) {
if (cfg.getScanInterval() <= 0) {
throw new IllegalArgumentException("scanInterval setting can't be 0 or less");
}
if (cfg.getNodeAddresses().isEmpty()) {
throw new IllegalArgumentException("At least one cluster node should be defined!");
}
Throwable lastException = null;
List failedMasters = new ArrayList<>();
boolean skipShardingDetection = false;
for (String address : cfg.getNodeAddresses()) {
RedisURI addr = new RedisURI(address);
CompletionStage connectionFuture = connectToNode(cfg, addr, addr.getHost());
try {
RedisConnection connection = connectionFuture.toCompletableFuture()
.get(config.getConnectTimeout(), TimeUnit.MILLISECONDS);
if (cfg.getNodeAddresses().size() == 1 && !addr.isIP()) {
configEndpointHostName = addr.getHost();
}
clusterNodesCommand = new RedisStrictCommand>("CLUSTER", "NODES",
new ObjectDecoder(new ClusterNodesDecoder(addr.getScheme())));
if (!skipShardingDetection) {
if (cfg.getShardedSubscriptionMode() == ShardedSubscriptionMode.AUTO) {
try {
connection.sync(RedisCommands.PUBSUB_SHARDNUMSUB);
subscribeService.setShardingSupported(true);
} catch (Exception e) {
// skip
}
} else if (cfg.getShardedSubscriptionMode() == ShardedSubscriptionMode.ON) {
subscribeService.setShardingSupported(true);
}
skipShardingDetection = true;
}
List nodes = connection.sync(clusterNodesCommand);
StringBuilder nodesValue = new StringBuilder();
for (ClusterNodeInfo clusterNodeInfo : nodes) {
nodesValue.append(clusterNodeInfo.getNodeInfo()).append("\n");
}
log.info("Redis cluster nodes configuration got from {}:\n{}", connection.getRedisClient().getAddr(), nodesValue);
lastClusterNode = addr;
CompletableFuture> partitionsFuture = parsePartitions(nodes);
Collection partitions;
try {
partitions = partitionsFuture.join();
} catch (CompletionException e) {
lastException = e.getCause();
break;
}
List> masterFutures = new ArrayList<>();
for (ClusterPartition partition : partitions) {
if (partition.isMasterFail()) {
failedMasters.add(partition.getMasterAddress().toString());
continue;
}
if (partition.getMasterAddress() == null) {
throw new IllegalStateException("Master node: " + partition.getNodeId() + " doesn't have an address.");
}
CompletionStage masterFuture = addMasterEntry(partition, cfg);
masterFutures.add(masterFuture.toCompletableFuture());
}
CompletableFuture masterFuture = CompletableFuture.allOf(masterFutures.toArray(new CompletableFuture[0]));
try {
masterFuture.join();
} catch (CompletionException e) {
lastException = e.getCause();
}
break;
} catch (Exception e) {
if (e instanceof CompletionException) {
e = (Exception) e.getCause();
}
lastException = e;
log.warn(e.getMessage());
}
}
if (lastPartitions.isEmpty()) {
internalShutdown();
if (failedMasters.isEmpty()) {
throw new RedisConnectionException("Can't connect to servers!", lastException);
} else {
throw new RedisConnectionException("Can't connect to servers! Failed masters according to cluster status: " + failedMasters, lastException);
}
}
if (cfg.isCheckSlotsCoverage() && lastPartitions.size() != MAX_SLOT) {
internalShutdown();
if (failedMasters.isEmpty()) {
throw new RedisConnectionException("Not all slots covered! Only " + lastPartitions.size() + " slots are available. Set checkSlotsCoverage = false to avoid this check.", lastException);
} else {
throw new RedisConnectionException("Not all slots covered! Only " + lastPartitions.size() + " slots are available. Set checkSlotsCoverage = false to avoid this check. Failed masters according to cluster status: " + failedMasters, lastException);
}
}
scheduleClusterChangeCheck(cfg);
}
@Override
public Collection getEntrySet() {
lazyConnect();
return client2entry.values();
}
@Override
public MasterSlaveEntry getEntry(RedisURI addr) {
lazyConnect();
for (MasterSlaveEntry entry : client2entry.values()) {
if (addr.equals(entry.getClient().getAddr())) {
return entry;
}
if (entry.hasSlave(addr)) {
return entry;
}
}
return null;
}
@Override
public MasterSlaveEntry getEntry(RedisClient redisClient) {
lazyConnect();
MasterSlaveEntry entry = client2entry.get(redisClient);
if (entry != null) {
return entry;
}
for (MasterSlaveEntry mentry : client2entry.values()) {
if (mentry.hasSlave(redisClient)) {
return mentry;
}
}
return null;
}
@Override
public MasterSlaveEntry getEntry(InetSocketAddress address) {
lazyConnect();
for (MasterSlaveEntry entry : client2entry.values()) {
InetSocketAddress addr = entry.getClient().getAddr();
if (addr.getAddress().equals(address.getAddress()) && addr.getPort() == address.getPort()) {
return entry;
}
if (entry.hasSlave(address)) {
return entry;
}
}
return null;
}
@Override
protected CompletableFuture changeMaster(int slot, RedisURI address) {
MasterSlaveEntry entry = getEntry(slot);
RedisClient oldClient = entry.getClient();
CompletableFuture future = super.changeMaster(slot, address);
return future.thenApply(res -> {
client2entry.remove(oldClient);
client2entry.put(entry.getClient(), entry);
return res;
});
}
@Override
public MasterSlaveEntry getEntry(int slot) {
lazyConnect();
return slot2entry.get(slot);
}
private void addEntry(Integer slot, MasterSlaveEntry entry) {
MasterSlaveEntry oldEntry = slot2entry.getAndSet(slot, entry);
if (oldEntry != entry) {
entry.incReference();
shutdownEntry(oldEntry);
}
client2entry.put(entry.getClient(), entry);
}
private void removeEntry(Integer slot) {
MasterSlaveEntry entry = slot2entry.getAndSet(slot, null);
shutdownEntry(entry);
}
private void removeEntry(Integer slot, MasterSlaveEntry entry) {
if (slot2entry.compareAndSet(slot, entry, null)) {
shutdownEntry(entry);
}
}
private void shutdownEntry(MasterSlaveEntry entry) {
if (entry != null && entry.decReference() == 0) {
entry.getAllEntries().forEach(e -> {
RedisURI uri = new RedisURI(e.getClient().getConfig().getAddress().getScheme(),
e.getClient().getAddr().getAddress().getHostAddress(),
e.getClient().getAddr().getPort());
disconnectNode(uri);
e.nodeDown();
});
entry.masterDown();
entry.shutdownAsync();
subscribeService.remove(entry);
RedisURI uri = new RedisURI(entry.getClient().getConfig().getAddress().getScheme(),
entry.getClient().getAddr().getAddress().getHostAddress(),
entry.getClient().getAddr().getPort());
disconnectNode(uri);
client2entry.remove(entry.getClient());
String slaves = entry.getAllEntries().stream()
.filter(e -> !e.getClient().getAddr().equals(entry.getClient().getAddr()))
.map(e -> e.getClient().toString())
.collect(Collectors.joining(","));
log.info("{} master and related slaves: {} removed", entry.getClient().getAddr(), slaves);
}
}
@Override
protected RedisClientConfig createRedisConfig(NodeType type, RedisURI address, int timeout, int commandTimeout, String sslHostname) {
RedisClientConfig result = super.createRedisConfig(type, address, timeout, commandTimeout, sslHostname);
result.setReadOnly(type == NodeType.SLAVE && config.getReadMode() != ReadMode.MASTER);
return result;
}
private CompletionStage addMasterEntry(ClusterPartition partition, ClusterServersConfig cfg) {
if (partition.isMasterFail()) {
RedisException e = new RedisException("Failed to add master: " +
partition.getMasterAddress() + " for slot ranges: " +
partition.getSlotRanges() + ". Reason - server has FAIL flag");
if (partition.getSlotsAmount() == 0) {
e = new RedisException("Failed to add master: " +
partition.getMasterAddress() + ". Reason - server has FAIL flag");
}
CompletableFuture result = new CompletableFuture<>();
result.completeExceptionally(e);
return result;
}
CompletionStage connectionFuture = connectToNode(cfg, partition.getMasterAddress(), configEndpointHostName);
return connectionFuture.thenCompose(connection -> {
MasterSlaveServersConfig config = create(cfg);
config.setMasterAddress(partition.getMasterAddress().toString());
MasterSlaveEntry entry;
if (config.isSlaveNotUsed()) {
entry = new SingleEntry(this, config);
} else {
Set slaveAddresses = partition.getSlaveAddresses().stream()
.filter(r -> !partition.getFailedSlaveAddresses().contains(r))
.map(r -> r.toString())
.collect(Collectors.toSet());
config.setSlaveAddresses(slaveAddresses);
entry = new MasterSlaveEntry(ClusterConnectionManager.this, config);
}
CompletableFuture f = entry.setupMasterEntry(new RedisURI(config.getMasterAddress()), configEndpointHostName);
return f.thenCompose(masterClient -> {
for (Integer slot : partition.getSlots()) {
addEntry(slot, entry);
addPartition(slot, partition);
}
if (partition.getSlotsAmount() > 0) {
lastUri2Partition.put(partition.getMasterAddress(), partition);
}
if (!config.isSlaveNotUsed()) {
CompletableFuture fs = entry.initSlaveBalancer(r -> configEndpointHostName);
return fs.thenAccept(r -> {
if (!partition.getSlaveAddresses().isEmpty()) {
log.info("slaves: {} added for master: {} slot ranges: {}",
partition.getSlaveAddresses(), partition.getMasterAddress(), partition.getSlotRanges());
if (!partition.getFailedSlaveAddresses().isEmpty()) {
log.warn("slaves: {} down for master: {} slot ranges: {}",
partition.getFailedSlaveAddresses(), partition.getMasterAddress(), partition.getSlotRanges());
}
}
log.info("master: {} added for slot ranges: {}", partition.getMasterAddress(), partition.getSlotRanges());
});
}
log.info("master: {} added for slot ranges: {}", partition.getMasterAddress(), partition.getSlotRanges());
return CompletableFuture.completedFuture(null);
});
});
}
private void addPartition(Integer slot, ClusterPartition partition) {
partition.incReference();
ClusterPartition prevPartiton = lastPartitions.put(slot, partition);
if (prevPartiton != null
&& prevPartiton.decReference() == 0) {
lastUri2Partition.remove(prevPartiton.getMasterAddress());
}
}
private void scheduleClusterChangeCheck(ClusterServersConfig cfg) {
monitorFuture = serviceManager.newTimeout(t -> {
if (configEndpointHostName != null) {
String address = cfg.getNodeAddresses().iterator().next();
RedisURI uri = new RedisURI(address);
CompletableFuture> allNodes = serviceManager.resolveAll(uri);
allNodes.whenComplete((nodes, ex) -> {
log.debug("{} resolved to {}", uri, nodes);
AtomicReference lastException = new AtomicReference<>(ex);
if (ex != null) {
checkClusterState(cfg, Collections.emptyIterator(), lastException, nodes);
return;
}
Iterator nodesIterator = nodes.iterator();
checkClusterState(cfg, nodesIterator, lastException, nodes);
});
} else {
AtomicReference lastException = new AtomicReference<>();
List nodes = new ArrayList<>();
List slaves = new ArrayList<>();
for (ClusterPartition partition : getLastPartitions()) {
if (!partition.isMasterFail()) {
nodes.add(partition.getMasterAddress());
}
Set partitionSlaves = new HashSet<>(partition.getSlaveAddresses());
partitionSlaves.removeAll(partition.getFailedSlaveAddresses());
slaves.addAll(partitionSlaves);
}
Collections.shuffle(nodes);
Collections.shuffle(slaves);
// master nodes first
nodes.addAll(slaves);
Iterator nodesIterator = nodes.iterator();
checkClusterState(cfg, nodesIterator, lastException, nodes);
}
}, cfg.getScanInterval(), TimeUnit.MILLISECONDS);
}
private void checkClusterState(ClusterServersConfig cfg, Iterator iterator, AtomicReference lastException, List allNodes) {
if (!iterator.hasNext()) {
if (lastException.get() != null) {
log.error("Can't update cluster state using nodes: {}. A new attempt will be made.", allNodes, lastException.getAndSet(null));
}
scheduleClusterChangeCheck(cfg);
return;
}
if (serviceManager.isShuttingDown()) {
return;
}
RedisURI uri = iterator.next();
CompletionStage connectionFuture = connectToNode(cfg, uri, configEndpointHostName);
connectionFuture.whenComplete((connection, e) -> {
if (e != null) {
if (!lastException.compareAndSet(null, e)) {
lastException.get().addSuppressed(e);
}
checkClusterState(cfg, iterator, lastException, allNodes);
return;
}
updateClusterState(cfg, connection, iterator, uri, lastException, allNodes);
});
}
private void updateClusterState(ClusterServersConfig cfg, RedisConnection connection,
Iterator iterator, RedisURI uri, AtomicReference lastException, List allNodes) {
RFuture> future = connection.async(StringCodec.INSTANCE, clusterNodesCommand);
future.whenComplete((nodes, e) -> {
if (e != null) {
if (!lastException.compareAndSet(null, e)) {
lastException.get().addSuppressed(e);
}
checkClusterState(cfg, iterator, lastException, allNodes);
return;
}
if (nodes.isEmpty()) {
log.debug("cluster nodes state got from {}: doesn't contain any nodes", connection.getRedisClient().getAddr());
checkClusterState(cfg, iterator, lastException, allNodes);
return;
}
lastClusterNode = uri;
if (log.isDebugEnabled()) {
StringBuilder nodesValue = new StringBuilder();
for (ClusterNodeInfo clusterNodeInfo : nodes) {
nodesValue.append(clusterNodeInfo.getNodeInfo()).append("\n");
}
log.debug("Cluster nodes state got from {}:\n{}", connection.getRedisClient().getAddr(), nodesValue);
serviceManager.setLastClusterNodes(nodesValue.toString());
}
CompletableFuture> newPartitionsFuture = parsePartitions(nodes);
newPartitionsFuture
.whenComplete((r, ex) -> {
if (ex != null) {
StringBuilder nodesValue = new StringBuilder();
for (ClusterNodeInfo clusterNodeInfo : nodes) {
nodesValue.append(clusterNodeInfo.getNodeInfo()).append("\n");
}
log.error("Unable to parse cluster nodes state got from: {}:\n{}", connection.getRedisClient().getAddr(), nodesValue, ex);
if (!lastException.compareAndSet(null, ex)) {
lastException.get().addSuppressed(ex);
}
checkClusterState(cfg, iterator, lastException, allNodes);
}
})
.thenCompose(newPartitions -> checkMasterNodesChange(cfg, newPartitions))
.thenCompose(r -> newPartitionsFuture)
.thenCompose(newPartitions -> checkSlaveNodesChange(newPartitions))
.thenCompose(r -> newPartitionsFuture)
.whenComplete((newPartitions, ex) -> {
if (newPartitions != null
&& !newPartitions.isEmpty()) {
try {
checkSlotsMigration(newPartitions);
checkSlotsChange(newPartitions);
} catch (Exception exc) {
log.error(exc.getMessage(), exc);
}
}
if (ex != null) {
log.error(ex.getMessage(), ex);
}
scheduleClusterChangeCheck(cfg);
});
});
}
private CompletableFuture checkSlaveNodesChange(Collection newPartitions) {
List> futures = new ArrayList<>();
for (ClusterPartition newPart : newPartitions) {
ClusterPartition currentPart = lastUri2Partition.get(newPart.getMasterAddress());
if (currentPart == null) {
continue;
}
MasterSlaveEntry entry = getEntry(currentPart.getSlotRanges().iterator().next().getStartSlot());
// should be invoked first in order to remove stale failedSlaveAddresses
CompletableFuture> addedSlavesFuture = addRemoveSlaves(entry, currentPart, newPart);
CompletableFuture f = addedSlavesFuture.thenCompose(addedSlaves -> {
// Have some slaves changed state from failed to alive?
return upDownSlaves(entry, currentPart, newPart, addedSlaves);
});
futures.add(f);
}
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
.exceptionally(e -> {
if (e != null) {
log.error("Unable to add/remove slave nodes", e);
}
return null;
});
}
private CompletableFuture upDownSlaves(MasterSlaveEntry entry, ClusterPartition currentPart, ClusterPartition newPart, Set addedSlaves) {
List> futures = new ArrayList<>();
List nonFailedSlaves = currentPart.getFailedSlaveAddresses().stream()
.filter(uri -> !addedSlaves.contains(uri) && !newPart.getFailedSlaveAddresses().contains(uri))
.collect(Collectors.toList());
nonFailedSlaves.forEach(uri -> {
if (entry.hasSlave(uri)) {
CompletableFuture f = entry.slaveUpNoMasterExclusionAsync(uri);
f = f.thenApply(v -> {
if (v) {
log.info("slave: {} is up for slot ranges: {}", uri, currentPart.getSlotRanges());
currentPart.removeFailedSlaveAddress(uri);
entry.excludeMasterFromSlaves(uri);
}
return v;
});
futures.add(f);
}
});
newPart.getFailedSlaveAddresses().stream()
.filter(uri -> !currentPart.getFailedSlaveAddresses().contains(uri))
.forEach(uri -> {
currentPart.addFailedSlaveAddress(uri);
boolean slaveDown = entry.slaveDown(uri);
if (config.isSlaveNotUsed() || slaveDown) {
disconnectNode(uri);
log.warn("slave: {} has down for slot ranges: {}", uri, currentPart.getSlotRanges());
}
});
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
}
private CompletableFuture> addRemoveSlaves(MasterSlaveEntry entry, ClusterPartition currentPart, ClusterPartition newPart) {
Set removedSlaves = new HashSet<>(currentPart.getSlaveAddresses());
removedSlaves.removeAll(newPart.getSlaveAddresses());
if (!removedSlaves.isEmpty()) {
log.info("removed slaves detected for master {}. current slaves {} last slaves {}",
currentPart.getMasterAddress(), currentPart.getSlaveAddresses(), newPart.getSlaveAddresses());
}
for (RedisURI uri : removedSlaves) {
currentPart.removeSlaveAddress(uri);
boolean slaveDown = entry.slaveDown(uri);
if (config.isSlaveNotUsed() || slaveDown) {
disconnectNode(uri);
log.info("slave {} removed for master {} and slot ranges: {}",
currentPart.getMasterAddress(), uri, currentPart.getSlotRanges());
}
}
Set addedSlaves = newPart.getSlaveAddresses().stream()
.filter(uri -> !currentPart.getSlaveAddresses().contains(uri)
&& !newPart.getFailedSlaveAddresses().contains(uri))
.collect(Collectors.toSet());
if (!addedSlaves.isEmpty()) {
log.info("added slaves detected for master {}. current slaves {} last slaves {} last failed slaves {}",
currentPart.getMasterAddress(), currentPart.getSlaveAddresses(),
newPart.getSlaveAddresses(), newPart.getFailedSlaveAddresses());
}
List> futures = new ArrayList<>();
for (RedisURI uri : addedSlaves) {
ClientConnectionsEntry slaveEntry = entry.getEntry(uri);
if (slaveEntry != null) {
CompletableFuture slaveUpFuture = entry.slaveUpNoMasterExclusionAsync(uri);
slaveUpFuture = slaveUpFuture.thenApply(v -> {
if (v) {
currentPart.addSlaveAddress(uri);
log.info("slave: {} unfreezed for master {} and slot ranges: {}",
currentPart.getMasterAddress(), uri, currentPart.getSlotRanges());
entry.excludeMasterFromSlaves(uri);
}
return v;
});
futures.add(slaveUpFuture);
continue;
}
CompletableFuture slaveUpFuture = entry.addSlave(uri, configEndpointHostName);
CompletableFuture f = slaveUpFuture.thenAccept(res -> {
currentPart.addSlaveAddress(uri);
log.info("slave: {} added for master {} and slot ranges: {}",
currentPart.getMasterAddress(), uri, currentPart.getSlotRanges());
entry.excludeMasterFromSlaves(uri);
});
futures.add(f);
}
CompletableFuture f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
return f.thenApply(r -> addedSlaves);
}
private ClusterPartition find(Collection partitions, Integer slot) {
return partitions.stream().filter(p -> p.hasSlot(slot)).findFirst().orElseThrow(() -> {
return new IllegalStateException("Unable to find partition with slot " + slot);
});
}
private CompletableFuture checkMasterNodesChange(ClusterServersConfig cfg, Collection newPartitions) {
Map addedPartitions = new HashMap<>();
Set mastersElected = new HashSet<>();
List> futures = new ArrayList<>();
for (ClusterPartition newPart : newPartitions) {
if (newPart.getSlotsAmount() == 0) {
continue;
}
ClusterPartition currentPart = lastUri2Partition.get(newPart.getMasterAddress());
boolean masterFound = currentPart != null;
if (masterFound && newPart.isMasterFail()) {
for (Integer slot : currentPart.getSlots()) {
ClusterPartition newMasterPart = find(newPartitions, slot);
// does partition have a new master?
if (!Objects.equals(newMasterPart.getMasterAddress(), currentPart.getMasterAddress())) {
RedisURI newUri = newMasterPart.getMasterAddress();
RedisURI oldUri = currentPart.getMasterAddress();
mastersElected.add(newUri);
CompletableFuture future = changeMaster(slot, newUri);
currentPart.setMasterAddress(newUri);
CompletableFuture f = future.whenComplete((res, e) -> {
if (e != null) {
currentPart.setMasterAddress(oldUri);
} else {
disconnectNode(oldUri);
}
});
futures.add(f);
}
}
}
if (!masterFound && !newPart.isMasterFail()) {
addedPartitions.put(newPart.getMasterAddress(), newPart);
}
}
addedPartitions.keySet().removeAll(mastersElected);
for (ClusterPartition newPart : addedPartitions.values()) {
CompletionStage future = addMasterEntry(newPart, cfg);
futures.add(future.toCompletableFuture());
}
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
.exceptionally(e -> {
if (e != null) {
log.error("Unable to add/change master node", e);
}
return null;
});
}
private void checkSlotsChange(Collection newPartitions) {
int newSlotsAmount = newPartitions.stream()
.mapToInt(ClusterPartition::getSlotsAmount)
.sum();
if (newSlotsAmount == lastPartitions.size() && lastPartitions.size() == MAX_SLOT) {
return;
}
Set removedSlots = lastPartitions.keySet().stream()
.filter(s -> newPartitions.stream().noneMatch(p -> p.hasSlot(s)))
.collect(Collectors.toSet());
for (Integer slot : removedSlots) {
ClusterPartition p = lastPartitions.remove(slot);
if (p != null
&& p.decReference() == 0
&& lastUri2Partition.size() > 1) {
lastUri2Partition.remove(p.getMasterAddress());
}
removeEntry(slot);
}
if (!removedSlots.isEmpty()) {
log.info("{} slots removed", removedSlots.size());
}
Integer addedSlots = 0;
for (ClusterPartition clusterPartition : newPartitions) {
MasterSlaveEntry entry = getEntry(clusterPartition.getMasterAddress());
for (Integer slot : clusterPartition.getSlots()) {
if (lastPartitions.containsKey(slot)) {
continue;
}
if (entry != null) {
addEntry(slot, entry);
addPartition(slot, clusterPartition);
lastUri2Partition.put(clusterPartition.getMasterAddress(), clusterPartition);
addedSlots++;
}
}
}
if (addedSlots > 0) {
log.info("{} slots added", addedSlots);
}
}
private void checkSlotsMigration(Collection newPartitions) {
Collection clusterLastPartitions = getLastPartitions();
// https://github.com/redisson/redisson/issues/3635
Map nodeEntries = clusterLastPartitions.stream()
.collect(Collectors.toMap(p -> p.getNodeId(),
p -> getEntry(p.getSlotRanges().iterator().next().getStartSlot())));
Set changedSlots = new HashSet<>();
for (ClusterPartition currentPartition : clusterLastPartitions) {
String nodeId = currentPartition.getNodeId();
for (ClusterPartition newPartition : newPartitions) {
if (!Objects.equals(nodeId, newPartition.getNodeId())
|| newPartition.getSlotRanges().equals(currentPartition.getSlotRanges())) {
continue;
}
MasterSlaveEntry entry = nodeEntries.get(nodeId);
BitSet addedSlots = newPartition.copySlots();
addedSlots.andNot(currentPartition.slots());
addedSlots.stream().forEach(slot -> {
addEntry(slot, entry);
addPartition(slot, currentPartition);
changedSlots.add(slot);
});
if (!addedSlots.isEmpty()) {
lastUri2Partition.put(currentPartition.getMasterAddress(), currentPartition);
log.info("{} slots added to {}", addedSlots.cardinality(), currentPartition.getMasterAddress());
}
BitSet removedSlots = currentPartition.copySlots();
removedSlots.andNot(newPartition.slots());
removedSlots.stream().forEach(slot -> {
if (lastPartitions.remove(slot, currentPartition)) {
if (currentPartition.decReference() == 0
&& lastUri2Partition.size() > 1) {
lastUri2Partition.remove(currentPartition.getMasterAddress());
}
removeEntry(slot, entry);
changedSlots.add(slot);
}
});
if (!removedSlots.isEmpty()) {
log.info("{} slots removed from {}", removedSlots.cardinality(), currentPartition.getMasterAddress());
}
if (!addedSlots.isEmpty() || !removedSlots.isEmpty()) {
// https://github.com/redisson/redisson/issues/3695, slotRanges not update when slots of node changed.
currentPartition.updateSlotRanges(newPartition.getSlotRanges(), newPartition.slots());
}
break;
}
}
changedSlots.forEach(subscribeService::reattachPubSub);
}
private int indexOf(byte[] array, byte element) {
for (int i = 0; i < array.length; ++i) {
if (array[i] == element) {
return i;
}
}
return -1;
}
@Override
public int calcSlot(byte[] key) {
if (key == null) {
return 0;
}
int start = indexOf(key, (byte) '{');
if (start != -1) {
int end = indexOf(key, (byte) '}');
if (end != -1 && start + 1 < end) {
key = Arrays.copyOfRange(key, start + 1, end);
}
}
int result = CRC16.crc16(key) % MAX_SLOT;
return result;
}
@Override
public int calcSlot(ByteBuf key) {
if (key == null) {
return 0;
}
int start = key.indexOf(key.readerIndex(), key.readerIndex() + key.readableBytes(), (byte) '{');
if (start != -1) {
int end = key.indexOf(start + 1, key.readerIndex() + key.readableBytes(), (byte) '}');
if (end != -1 && start + 1 < end) {
key = key.slice(start + 1, end-start - 1);
}
}
int result = CRC16.crc16(key) % MAX_SLOT;
log.debug("slot {} for {}", result, key);
return result;
}
@Override
public int calcSlot(String key) {
if (key == null) {
return 0;
}
int start = key.indexOf('{');
if (start != -1) {
int end = key.indexOf('}');
if (end != -1 && start + 1 < end) {
key = key.substring(start + 1, end);
}
}
int result = CRC16.crc16(key.getBytes()) % MAX_SLOT;
log.debug("slot {} for {}", result, key);
return result;
}
private CompletableFuture> parsePartitions(List nodes) {
Map partitions = new ConcurrentHashMap<>();
List> futures = new ArrayList<>();
for (ClusterNodeInfo clusterNodeInfo : nodes) {
if (clusterNodeInfo.containsFlag(Flag.NOADDR)
|| clusterNodeInfo.containsFlag(Flag.HANDSHAKE)
|| clusterNodeInfo.getAddress() == null
|| (clusterNodeInfo.getSlotRanges().isEmpty() && clusterNodeInfo.containsFlag(Flag.MASTER))) {
// skip it
continue;
}
String masterId;
if (clusterNodeInfo.containsFlag(Flag.SLAVE)) {
masterId = clusterNodeInfo.getSlaveOf();
} else {
masterId = clusterNodeInfo.getNodeId();
}
if (masterId == null) {
// skip it
continue;
}
CompletableFuture> ipsFuture = serviceManager.resolveAll(clusterNodeInfo.getAddress());
CompletableFuture f = ipsFuture.thenAccept(addresses -> {
int index = 0;
if (addresses.size() > 1) {
addresses.sort(Comparator.comparing(RedisURI::getHost));
}
RedisURI address = addresses.get(index);
if (addresses.size() > 1) {
for (RedisURI addr : addresses) {
for (ClusterPartition value : lastUri2Partition.values()) {
if (value.getNodeId().equals(clusterNodeInfo.getNodeId())
&& value.getMasterAddress().equals(addr)) {
address = addr;
break;
}
}
}
}
if (addresses.size() == 1) {
if (!clusterNodeInfo.getAddress().equals(address)) {
log.debug("{} resolved to {}", clusterNodeInfo.getAddress(), address);
}
} else {
log.debug("{} resolved to {} and {} selected", clusterNodeInfo.getAddress(), addresses, address);
}
if (clusterNodeInfo.containsFlag(Flag.SLAVE)) {
ClusterPartition masterPartition = partitions.computeIfAbsent(masterId, k -> new ClusterPartition(masterId));
ClusterPartition slavePartition = partitions.computeIfAbsent(clusterNodeInfo.getNodeId(),
k -> new ClusterPartition(clusterNodeInfo.getNodeId()));
slavePartition.setType(Type.SLAVE);
slavePartition.setParent(masterPartition);
masterPartition.addSlaveAddress(address);
if (clusterNodeInfo.containsFlag(Flag.FAIL)) {
masterPartition.addFailedSlaveAddress(address);
}
} else if (clusterNodeInfo.containsFlag(Flag.MASTER)) {
ClusterPartition masterPartition = partitions.computeIfAbsent(masterId, k -> new ClusterPartition(masterId));
masterPartition.setSlotRanges(clusterNodeInfo.getSlotRanges());
masterPartition.setMasterAddress(address);
masterPartition.setType(Type.MASTER);
if (clusterNodeInfo.containsFlag(Flag.FAIL)) {
masterPartition.setMasterFail(true);
}
}
}).exceptionally(ex -> {
if (clusterNodeInfo.containsFlag(Flag.FAIL)
|| clusterNodeInfo.containsFlag(Flag.EVENTUAL_FAIL)) {
return null;
}
log.error(ex.getMessage(), ex);
return null;
});
futures.add(f);
}
CompletableFuture future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
return future.thenApply(r -> {
addCascadeSlaves(partitions.values());
List ps = partitions.values()
.stream()
.filter(cp -> cp.getType() == Type.MASTER
&& cp.getMasterAddress() != null
&& ((!cp.slots().isEmpty() && partitions.size() == 1) || partitions.size() > 1))
.collect(Collectors.toList());
return ps;
});
}
private void addCascadeSlaves(Collection partitions) {
Iterator iter = partitions.iterator();
while (iter.hasNext()) {
ClusterPartition cp = iter.next();
if (cp.getType() != Type.SLAVE) {
continue;
}
if (cp.getParent() != null && cp.getParent().getType() == Type.MASTER) {
ClusterPartition parent = cp.getParent();
for (RedisURI addr : cp.getSlaveAddresses()) {
parent.addSlaveAddress(addr);
}
for (RedisURI addr : cp.getFailedSlaveAddresses()) {
parent.addFailedSlaveAddress(addr);
}
}
iter.remove();
}
}
@Override
public void shutdown(long quietPeriod, long timeout, TimeUnit unit) {
if (monitorFuture != null) {
monitorFuture.cancel();
}
closeNodeConnections();
super.shutdown(quietPeriod, timeout, unit);
}
private Collection getLastPartitions() {
return lastUri2Partition.values().stream().collect(Collectors.toMap(e -> e.getNodeId(), Function.identity(),
BinaryOperator.maxBy(Comparator.comparing(e -> e.getTime())))).values();
}
public int getSlot(MasterSlaveEntry entry) {
return lastPartitions.entrySet().stream()
.filter(e -> e.getValue().getMasterAddress().equals(entry.getClient().getConfig().getAddress()))
.findAny()
.map(m -> m.getKey())
.orElse(-1);
}
@Override
public RedisURI getLastClusterNode() {
return lastClusterNode;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy