com.lambdaworks.redis.cluster.RedisClusterClient Maven / Gradle / Ivy
Show all versions of lettuce Show documentation
/*
* Copyright 2011-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lambdaworks.redis.cluster;
import java.io.Closeable;
import java.net.SocketAddress;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import com.lambdaworks.redis.*;
import com.lambdaworks.redis.api.StatefulRedisConnection;
import com.lambdaworks.redis.cluster.api.NodeSelectionSupport;
import com.lambdaworks.redis.cluster.api.StatefulRedisClusterConnection;
import com.lambdaworks.redis.cluster.api.async.RedisAdvancedClusterAsyncCommands;
import com.lambdaworks.redis.cluster.api.sync.RedisAdvancedClusterCommands;
import com.lambdaworks.redis.cluster.event.ClusterTopologyChangedEvent;
import com.lambdaworks.redis.cluster.models.partitions.Partitions;
import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode;
import com.lambdaworks.redis.cluster.topology.ClusterTopologyRefresh;
import com.lambdaworks.redis.cluster.topology.NodeConnectionFactory;
import com.lambdaworks.redis.cluster.topology.TopologyComparators;
import com.lambdaworks.redis.codec.RedisCodec;
import com.lambdaworks.redis.codec.StringCodec;
import com.lambdaworks.redis.internal.LettuceAssert;
import com.lambdaworks.redis.internal.LettuceLists;
import com.lambdaworks.redis.output.KeyValueStreamingChannel;
import com.lambdaworks.redis.output.ValueStreamingChannel;
import com.lambdaworks.redis.protocol.DefaultEndpoint;
import com.lambdaworks.redis.protocol.CommandHandler;
import com.lambdaworks.redis.pubsub.PubSubCommandHandler;
import com.lambdaworks.redis.pubsub.PubSubEndpoint;
import com.lambdaworks.redis.pubsub.StatefulRedisPubSubConnection;
import com.lambdaworks.redis.pubsub.StatefulRedisPubSubConnectionImpl;
import com.lambdaworks.redis.resource.ClientResources;
import com.lambdaworks.redis.resource.SocketAddressResolver;
import io.netty.util.concurrent.ScheduledFuture;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
/**
* A scalable thread-safe Redis cluster client. Multiple threads may share one connection. The
* cluster client handles command routing based on the first key of the command and maintains a view of the cluster that is
* available when calling the {@link #getPartitions()} method.
*
*
* Connections to the cluster members are opened on the first access to the cluster node and managed by the
* {@link StatefulRedisClusterConnection}. You should not use transactional commands on cluster connections since {@code
* MULTI}, {@code EXEC} and {@code DISCARD} have no key and cannot be assigned to a particular node.
*
*
* The Redis cluster client provides a {@link RedisAdvancedClusterCommands sync}, {@link RedisAdvancedClusterAsyncCommands
* async} and {@link com.lambdaworks.redis.cluster.api.reactive.RedisAdvancedClusterReactiveCommands reactive} API.
*
*
*
* Connections to particular nodes can be obtained by {@link StatefulRedisClusterConnection#getConnection(String)} providing the
* node id or {@link StatefulRedisClusterConnection#getConnection(String, int)} by host and port.
*
*
*
* Multiple keys operations have to operate on a key
* that hashes to the same slot. Following commands do not need to follow that rule since they are pipelined according to its
* hash value to multiple nodes in parallel on the sync, async and, reactive API:
*
*
* - {@link RedisAdvancedClusterAsyncCommands#del(Object[]) DEL}
* - {@link RedisAdvancedClusterAsyncCommands#unlink(Object[]) UNLINK}
* - {@link RedisAdvancedClusterAsyncCommands#mget(Object[]) MGET}
* - {@link RedisAdvancedClusterAsyncCommands#mget(KeyValueStreamingChannel, Object[])} ) MGET with streaming}
* - {@link RedisAdvancedClusterAsyncCommands#mset(Map) MSET}
* - {@link RedisAdvancedClusterAsyncCommands#msetnx(Map) MSETNX}
*
*
*
* Following commands on the Cluster sync, async and, reactive API are implemented with a Cluster-flavor:
*
*
* - {@link RedisAdvancedClusterAsyncCommands#clientSetname(Object)} Executes {@code CLIENT SET} on all connections and
* initializes new connections with the {@code clientName}.
* - {@link RedisAdvancedClusterAsyncCommands#flushall()} Run {@code FLUSHALL} on all master nodes.
* - {@link RedisAdvancedClusterAsyncCommands#flushdb()} Executes {@code FLUSHDB} on all master nodes.
* - {@link RedisAdvancedClusterAsyncCommands#keys(Object)} Executes {@code
* KEYS} on all.
* - {@link RedisAdvancedClusterAsyncCommands#randomkey()} Returns a random key from a random master node.
* - {@link RedisAdvancedClusterAsyncCommands#scriptFlush()} Executes {@code SCRIPT FLUSH} on all nodes.
* - {@link RedisAdvancedClusterAsyncCommands#scriptKill()} Executes {@code SCRIPT KILL} on all nodes.
* - {@link RedisAdvancedClusterAsyncCommands#shutdown(boolean)} Executes {@code SHUTDOWN} on all nodes.
* - {@link RedisAdvancedClusterAsyncCommands#scan()} Executes a {@code SCAN} on all nodes according to {@link ReadFrom}. The
* resulting cursor must be reused across the {@code SCAN} to scan iteratively across the whole cluster.
*
*
*
* Cluster commands can be issued to multiple hosts in parallel by using the {@link NodeSelectionSupport} API. A set of nodes is
* selected using a {@link java.util.function.Predicate} and commands can be issued to the node selection
*
*
* AsyncExecutions ping = commands.masters().commands().ping();
* Collection nodes = ping.nodes();
* nodes.stream().forEach(redisClusterNode -> ping.get(redisClusterNode));
*
*
*
* {@link RedisClusterClient} is an expensive resource. Reuse this instance or the {@link ClientResources} as much as possible.
*
* @author Mark Paluch
* @since 3.0
*/
public class RedisClusterClient extends AbstractRedisClient {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(RedisClusterClient.class);
protected final AtomicBoolean clusterTopologyRefreshActivated = new AtomicBoolean(false);
protected final AtomicReference> clusterTopologyRefreshFuture = new AtomicReference<>();
private final ClusterTopologyRefresh refresh = new ClusterTopologyRefresh(new NodeConnectionFactoryImpl(), getResources());
private final ClusterTopologyRefreshScheduler clusterTopologyRefreshScheduler = new ClusterTopologyRefreshScheduler(this,
getResources());
private final Iterable initialUris;
private Partitions partitions;
private RedisClusterClient() {
super(null);
initialUris = Collections.emptyList();
}
/**
* Initialize the client with a list of cluster URI's. All uris are tried in sequence for connecting initially to the
* cluster. If any uri is successful for connection, the others are not tried anymore. The initial uri is needed to discover
* the cluster structure for distributing the requests.
*
* @param clientResources the client resources. If {@literal null}, the client will create a new dedicated instance of
* client resources and keep track of them.
* @param redisURIs iterable of initial {@link RedisURI cluster URIs}. Must not be {@literal null} and not empty.
*/
protected RedisClusterClient(ClientResources clientResources, Iterable redisURIs) {
super(clientResources);
assertNotEmpty(redisURIs);
assertSameOptions(redisURIs);
this.initialUris = Collections.unmodifiableList(LettuceLists.newList(redisURIs));
setDefaultTimeout(getFirstUri().getTimeout(), getFirstUri().getUnit());
setOptions(ClusterClientOptions.builder().build());
}
private static void assertSameOptions(Iterable redisURIs) {
Boolean ssl = null;
Boolean startTls = null;
Boolean verifyPeer = null;
for (RedisURI redisURI : redisURIs) {
if (ssl == null) {
ssl = redisURI.isSsl();
}
if (startTls == null) {
startTls = redisURI.isStartTls();
}
if (verifyPeer == null) {
verifyPeer = redisURI.isVerifyPeer();
}
if (ssl.booleanValue() != redisURI.isSsl()) {
throw new IllegalArgumentException(
"RedisURI " + redisURI + " SSL is not consistent with the other seed URI SSL settings");
}
if (startTls.booleanValue() != redisURI.isStartTls()) {
throw new IllegalArgumentException(
"RedisURI " + redisURI + " StartTLS is not consistent with the other seed URI StartTLS settings");
}
if (verifyPeer.booleanValue() != redisURI.isVerifyPeer()) {
throw new IllegalArgumentException(
"RedisURI " + redisURI + " VerifyPeer is not consistent with the other seed URI VerifyPeer settings");
}
}
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with default {@link ClientResources}. You can
* connect to different Redis servers but you must supply a {@link RedisURI} on connecting.
*
* @param redisURI the Redis URI, must not be {@literal null}
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(RedisURI redisURI) {
assertNotNull(redisURI);
return create(Collections.singleton(redisURI));
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with default {@link ClientResources}. You can
* connect to different Redis servers but you must supply a {@link RedisURI} on connecting.
*
* @param redisURIs one or more Redis URI, must not be {@literal null} and not empty
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(Iterable redisURIs) {
assertNotEmpty(redisURIs);
assertSameOptions(redisURIs);
return new RedisClusterClient(null, redisURIs);
}
/**
* Create a new client that connects to the supplied uri with default {@link ClientResources}. You can connect to different
* Redis servers but you must supply a {@link RedisURI} on connecting.
*
* @param uri the Redis URI, must not be {@literal null}
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(String uri) {
LettuceAssert.notNull(uri, "URI must not be null");
return create(RedisURI.create(uri));
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to
* shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but
* you must supply a {@link RedisURI} on connecting.
*
* @param clientResources the client resources, must not be {@literal null}
* @param redisURI the Redis URI, must not be {@literal null}
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(ClientResources clientResources, RedisURI redisURI) {
assertNotNull(clientResources);
assertNotNull(redisURI);
return create(clientResources, Collections.singleton(redisURI));
}
/**
* Create a new client that connects to the supplied uri with shared {@link ClientResources}.You need to shut down the
* {@link ClientResources} upon shutting down your application. You can connect to different Redis servers but you must
* supply a {@link RedisURI} on connecting.
*
* @param clientResources the client resources, must not be {@literal null}
* @param uri the Redis URI, must not be {@literal null}
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(ClientResources clientResources, String uri) {
assertNotNull(clientResources);
LettuceAssert.notNull(uri, "URI must not be null");
return create(clientResources, RedisURI.create(uri));
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to
* shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but
* you must supply a {@link RedisURI} on connecting.
*
* @param clientResources the client resources, must not be {@literal null}
* @param redisURIs one or more Redis URI, must not be {@literal null} and not empty
* @return a new instance of {@link RedisClusterClient}
*/
public static RedisClusterClient create(ClientResources clientResources, Iterable redisURIs) {
assertNotNull(clientResources);
assertNotEmpty(redisURIs);
assertSameOptions(redisURIs);
return new RedisClusterClient(clientResources, redisURIs);
}
/**
* Connect to a Redis Cluster and treat keys and values as UTF-8 strings.
*
* What to expect from this connection:
*
*
* - A default connection is created to the node with the lowest latency
* - Keyless commands are send to the default connection
* - Single-key keyspace commands are routed to the appropriate node
* - Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node
* - Pub/sub commands are sent to the node that handles the slot derived from the pub/sub channel
*
*
* @return A new stateful Redis Cluster connection
*/
public StatefulRedisClusterConnection connect() {
return connect(newStringStringCodec());
}
/**
* Connect to a Redis Cluster. Use the supplied {@link RedisCodec codec} to encode/decode keys and values.
*
* What to expect from this connection:
*
*
* - A default connection is created to the node with the lowest latency
* - Keyless commands are send to the default connection
* - Single-key keyspace commands are routed to the appropriate node
* - Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node
* - Pub/sub commands are sent to the node that handles the slot derived from the pub/sub channel
*
*
* @param codec Use this codec to encode/decode keys and values, must not be {@literal null}
* @param Key type
* @param Value type
* @return A new stateful Redis Cluster connection
*/
@SuppressWarnings("unchecked")
public StatefulRedisClusterConnection connect(RedisCodec codec) {
return connectClusterImpl(codec);
}
/**
* Connect to a Redis Cluster using pub/sub connections and treat keys and values as UTF-8 strings.
*
* What to expect from this connection:
*
*
* - A default connection is created to the node with the least number of clients
* - Pub/sub commands are sent to the node with the least number of clients
* - Keyless commands are send to the default connection
* - Single-key keyspace commands are routed to the appropriate node
* - Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node
*
*
* @return A new stateful Redis Cluster connection
*/
public StatefulRedisPubSubConnection connectPubSub() {
return connectPubSub(newStringStringCodec());
}
/**
* Connect to a Redis Cluster using pub/sub connections. Use the supplied {@link RedisCodec codec} to encode/decode keys and
* values.
*
* What to expect from this connection:
*
*
* - A default connection is created to the node with the least number of clients
* - Pub/sub commands are sent to the node with the least number of clients
* - Keyless commands are send to the default connection
* - Single-key keyspace commands are routed to the appropriate node
* - Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node
*
*
* @param codec Use this codec to encode/decode keys and values, must not be {@literal null}
* @param Key type
* @param Value type
* @return A new stateful Redis Cluster connection
*/
@SuppressWarnings("unchecked")
public StatefulRedisPubSubConnection connectPubSub(RedisCodec codec) {
return connectClusterPubSubImpl(codec);
}
protected StatefulRedisConnection connectToNode(final SocketAddress socketAddress) {
return connectToNode(newStringStringCodec(), socketAddress.toString(), null, new Supplier() {
@Override
public SocketAddress get() {
return socketAddress;
}
});
}
/**
* Create a connection to a redis socket address.
*
* @param codec Use this codec to encode/decode keys and values, must not be {@literal null}
* @param nodeId the nodeId
* @param clusterWriter global cluster writer
* @param socketAddressSupplier supplier for the socket address
* @param Key type
* @param Value type
* @return A new connection
*/
StatefulRedisConnection connectToNode(RedisCodec codec, String nodeId, RedisChannelWriter clusterWriter,
final Supplier socketAddressSupplier) {
assertNotNull(codec);
assertNotEmpty(initialUris);
LettuceAssert.notNull(socketAddressSupplier, "SocketAddressSupplier must not be null");
logger.debug("connectNode(" + nodeId + ")");
ClusterNodeEndpoint endpoint = new ClusterNodeEndpoint(clientOptions, getResources(), clusterWriter);
StatefulRedisConnectionImpl connection = new StatefulRedisConnectionImpl(endpoint, codec, timeout, unit);
try {
connectStateful(connection, endpoint, getFirstUri(), socketAddressSupplier,
() -> new CommandHandler(clientResources, endpoint));
connection.registerCloseables(closeableResources, connection);
} catch (RedisException e) {
connection.close();
throw e;
}
return connection;
}
/**
* Create a clustered pub/sub connection with command distributor.
*
* @param codec Use this codec to encode/decode keys and values, must not be {@literal null}
* @param Key type
* @param Value type
* @return a new connection
*/
StatefulRedisClusterConnectionImpl connectClusterImpl(RedisCodec codec) {
if (partitions == null) {
initializePartitions();
}
activateTopologyRefreshIfNeeded();
logger.debug("connectCluster(" + initialUris + ")");
Supplier socketAddressSupplier = getSocketAddressSupplier(TopologyComparators::sortByClientCount);
DefaultEndpoint endpoint = new DefaultEndpoint(clientOptions);
ClusterDistributionChannelWriter clusterWriter = new ClusterDistributionChannelWriter(clientOptions,
endpoint, clusterTopologyRefreshScheduler, getResources().eventExecutorGroup());
PooledClusterConnectionProvider pooledClusterConnectionProvider = new PooledClusterConnectionProvider(this,
clusterWriter, codec);
clusterWriter.setClusterConnectionProvider(pooledClusterConnectionProvider);
StatefulRedisClusterConnectionImpl connection = new StatefulRedisClusterConnectionImpl<>(clusterWriter, codec,
timeout, unit);
connection.setReadFrom(ReadFrom.MASTER);
connection.setPartitions(partitions);
boolean connected = false;
RedisException causingException = null;
int connectionAttempts = Math.max(1, partitions.size());
for (int i = 0; i < connectionAttempts; i++) {
try {
connectStateful(endpoint, connection, getFirstUri(), socketAddressSupplier,
() -> new CommandHandler(clientResources, endpoint));
connected = true;
break;
} catch (RedisException e) {
logger.warn(e.getMessage());
causingException = e;
}
}
if (!connected) {
connection.close();
if (causingException != null) {
throw causingException;
}
}
connection.registerCloseables(closeableResources, connection, clusterWriter, pooledClusterConnectionProvider);
return connection;
}
/**
* Create a clustered connection with command distributor.
*
* @param codec Use this codec to encode/decode keys and values, must not be {@literal null}
* @param Key type
* @param Value type
* @return a new connection
*/
StatefulRedisPubSubConnectionImpl connectClusterPubSubImpl(RedisCodec codec) {
if (partitions == null) {
initializePartitions();
}
activateTopologyRefreshIfNeeded();
logger.debug("connectClusterPubSub(" + initialUris + ")");
Supplier socketAddressSupplier = getSocketAddressSupplier(TopologyComparators::sortByClientCount);
PubSubEndpoint endpoint = new PubSubEndpoint(clientOptions);
ClusterDistributionChannelWriter clusterWriter = new ClusterDistributionChannelWriter(clientOptions, endpoint,
clusterTopologyRefreshScheduler, getResources().eventExecutorGroup());
PooledClusterConnectionProvider pooledClusterConnectionProvider = new PooledClusterConnectionProvider(this,
clusterWriter, codec);
clusterWriter.setClusterConnectionProvider(pooledClusterConnectionProvider);
StatefulRedisPubSubConnectionImpl connection = new StatefulRedisPubSubConnectionImpl<>(endpoint, clusterWriter,
codec, timeout, unit);
clusterWriter.setPartitions(partitions);
boolean connected = false;
RedisException causingException = null;
int connectionAttempts = Math.max(1, partitions.size());
for (int i = 0; i < connectionAttempts; i++) {
try {
connectStateful(connection, endpoint, getFirstUri(), socketAddressSupplier,
() -> new PubSubCommandHandler(clientResources, codec, endpoint));
connected = true;
break;
} catch (RedisException e) {
logger.warn(e.getMessage());
causingException = e;
}
}
if (!connected) {
connection.close();
throw causingException;
}
connection.registerCloseables(closeableResources, connection, clusterWriter, pooledClusterConnectionProvider);
if (getFirstUri().getPassword() != null) {
connection.async().auth(new String(getFirstUri().getPassword()));
}
return connection;
}
/**
* Connect to a endpoint provided by {@code socketAddressSupplier} using connection settings (authentication, SSL) from
* {@code connectionSettings}.
*
*/
private void connectStateful(StatefulRedisConnectionImpl connection, DefaultEndpoint endpoint,
RedisURI connectionSettings, Supplier socketAddressSupplier,
Supplier commandHandlerSupplier) {
connectStateful0(connection, endpoint, connectionSettings, socketAddressSupplier, commandHandlerSupplier);
if (connectionSettings.getPassword() != null && connectionSettings.getPassword().length != 0) {
connection.async().auth(new String(connectionSettings.getPassword()));
}
}
/**
* Connect to a endpoint provided by {@code socketAddressSupplier} using connection settings (authentication, SSL) from
* {@code connectionSettings}.
*
*/
private void connectStateful(DefaultEndpoint endpoint, StatefulRedisClusterConnectionImpl connection,
RedisURI connectionSettings, Supplier socketAddressSupplier,
Supplier commandHandlerSupplier) {
connectStateful0(connection, endpoint, connectionSettings, socketAddressSupplier, commandHandlerSupplier);
if (connectionSettings.getPassword() != null && connectionSettings.getPassword().length != 0) {
connection.async().auth(new String(connectionSettings.getPassword()));
}
}
/**
* Connect to a endpoint provided by {@code socketAddressSupplier} using connection settings (SSL) from {@code
* connectionSettings}.
*/
private void connectStateful0(RedisChannelHandler connection, DefaultEndpoint endpoint,
RedisURI connectionSettings, Supplier socketAddressSupplier,
Supplier commandHandlerSupplier) {
ConnectionBuilder connectionBuilder;
if (connectionSettings.isSsl()) {
SslConnectionBuilder sslConnectionBuilder = SslConnectionBuilder.sslConnectionBuilder();
sslConnectionBuilder.ssl(connectionSettings);
connectionBuilder = sslConnectionBuilder;
} else {
connectionBuilder = ConnectionBuilder.connectionBuilder();
}
connectionBuilder.reconnectionListener(new ReconnectEventListener(clusterTopologyRefreshScheduler));
connectionBuilder.clientOptions(clientOptions);
connectionBuilder.connection(connection);
connectionBuilder.clientResources(clientResources);
connectionBuilder.endpoint(endpoint);
connectionBuilder.commandHandler(commandHandlerSupplier);
connectionBuilder(socketAddressSupplier, connectionBuilder, connectionSettings);
channelType(connectionBuilder, connectionSettings);
initializeChannel(connectionBuilder);
}
/**
* Reload partitions and re-initialize the distribution table.
*/
public void reloadPartitions() {
if (partitions == null) {
initializePartitions();
partitions.updateCache();
} else {
Partitions loadedPartitions = loadPartitions();
if (TopologyComparators.isChanged(getPartitions(), loadedPartitions)) {
logger.debug("Using a new cluster topology");
List before = new ArrayList(getPartitions());
List after = new ArrayList(loadedPartitions);
getResources().eventBus().publish(new ClusterTopologyChangedEvent(before, after));
}
this.partitions.reload(loadedPartitions.getPartitions());
}
updatePartitionsInConnections();
}
protected void updatePartitionsInConnections() {
forEachClusterConnection(input -> {
input.setPartitions(partitions);
});
}
protected void initializePartitions() {
Partitions loadedPartitions = loadPartitions();
this.partitions = loadedPartitions;
}
/**
* Retrieve the cluster view. Partitions are shared amongst all connections opened by this client instance.
*
* @return the partitions.
*/
public Partitions getPartitions() {
if (partitions == null) {
initializePartitions();
}
return partitions;
}
/**
* Retrieve partitions. Nodes within {@link Partitions} are ordered by latency. Lower latency nodes come first.
*
* @return Partitions
*/
protected Partitions loadPartitions() {
Iterable topologyRefreshSource = getTopologyRefreshSource();
Map partitions = refresh.loadViews(topologyRefreshSource, useDynamicRefreshSources());
if (partitions.isEmpty()) {
throw new RedisException("Cannot retrieve initial cluster partitions from initial URIs " + topologyRefreshSource);
}
Partitions loadedPartitions = determinePartitions(this.partitions, partitions);
RedisURI viewedBy = refresh.getViewedBy(partitions, loadedPartitions);
for (RedisClusterNode partition : loadedPartitions) {
if (viewedBy != null) {
RedisURI uri = partition.getUri();
applyUriConnectionSettings(viewedBy, uri);
}
}
activateTopologyRefreshIfNeeded();
return loadedPartitions;
}
/**
* Determines a {@link Partitions topology view} based on the current and the obtain topology views.
*
* @param current the current topology view. May be {@literal null} if {@link RedisClusterClient} has no topology view yet.
* @param topologyViews the obtain topology views
* @return the {@link Partitions topology view} to use.
*/
protected Partitions determinePartitions(Partitions current, Map topologyViews) {
if (current == null) {
return PartitionsConsensus.HEALTHY_MAJORITY.getPartitions(null, topologyViews);
}
return PartitionsConsensus.KNOWN_MAJORITY.getPartitions(current, topologyViews);
}
private void activateTopologyRefreshIfNeeded() {
if (getOptions() instanceof ClusterClientOptions) {
ClusterClientOptions options = (ClusterClientOptions) getOptions();
ClusterTopologyRefreshOptions topologyRefreshOptions = options.getTopologyRefreshOptions();
if (!topologyRefreshOptions.isPeriodicRefreshEnabled() || clusterTopologyRefreshActivated.get()) {
return;
}
if (clusterTopologyRefreshActivated.compareAndSet(false, true)) {
ScheduledFuture> scheduledFuture = genericWorkerPool.scheduleAtFixedRate(clusterTopologyRefreshScheduler,
options.getRefreshPeriod(), options.getRefreshPeriod(), options.getRefreshPeriodUnit());
clusterTopologyRefreshFuture.set(scheduledFuture);
}
}
}
protected RedisURI getFirstUri() {
assertNotEmpty(initialUris);
Iterator iterator = initialUris.iterator();
return iterator.next();
}
/**
* Returns a {@link Supplier} for {@link SocketAddress connection points}.
*
* @param sortFunction Sort function to enforce a specific order. The sort function must not change the order or the input
* parameter but create a new collection with the desired order, must not be {@literal null}.
* @return {@link Supplier} for {@link SocketAddress connection points}.
*/
protected Supplier getSocketAddressSupplier(
Function> sortFunction) {
LettuceAssert.notNull(sortFunction, "Sort function must not be null");
final RoundRobinSocketAddressSupplier socketAddressSupplier = new RoundRobinSocketAddressSupplier(partitions,
sortFunction, clientResources);
return () -> {
if (partitions.isEmpty()) {
SocketAddress socketAddress = SocketAddressResolver.resolve(getFirstUri(), clientResources.dnsResolver());
logger.debug("Resolved SocketAddress {} using {}", socketAddress, getFirstUri());
return socketAddress;
}
return socketAddressSupplier.get();
};
}
protected RedisCodec newStringStringCodec() {
return StringCodec.UTF8;
}
/**
* Sets the new cluster topology. The partitions are not applied to existing connections.
*
* @param partitions partitions object
*/
public void setPartitions(Partitions partitions) {
this.partitions = partitions;
}
/**
* Returns the {@link ClientResources} which are used with that client.
*
* @return the {@link ClientResources} for this client
*/
public ClientResources getResources() {
return clientResources;
}
/**
* Shutdown this client and close all open connections. The client should be discarded after calling shutdown.
*
* @param quietPeriod the quiet period as described in the documentation
* @param timeout the maximum amount of time to wait until the executor is shutdown regardless if a task was submitted
* during the quiet period
* @param timeUnit the unit of {@code quietPeriod} and {@code timeout}
*/
@Override
public void shutdown(long quietPeriod, long timeout, TimeUnit timeUnit) {
if (clusterTopologyRefreshActivated.compareAndSet(true, false)) {
ScheduledFuture> scheduledFuture = clusterTopologyRefreshFuture.get();
try {
scheduledFuture.cancel(false);
clusterTopologyRefreshFuture.set(null);
} catch (Exception e) {
logger.debug("Could not unschedule Cluster topology refresh", e);
}
}
super.shutdown(quietPeriod, timeout, timeUnit);
}
protected void forEachClusterConnection(Consumer> function) {
forEachCloseable(input -> input instanceof StatefulRedisClusterConnectionImpl, function);
}
protected void forEachCloseable(Predicate super Closeable> selector, Consumer function) {
for (Closeable c : closeableResources) {
if (selector.test(c)) {
function.accept((T) c);
}
}
}
/**
* Set the {@link ClusterClientOptions} for the client.
*
* @param clientOptions client options for the client and connections that are created after setting the options
*/
public void setOptions(ClusterClientOptions clientOptions) {
super.setOptions(clientOptions);
}
/**
* Returns the initial {@link RedisURI URIs}.
*
* @return the initial {@link RedisURI URIs}
*/
protected Iterable getInitialUris() {
return initialUris;
}
ClusterClientOptions getClusterClientOptions() {
if (getOptions() instanceof ClusterClientOptions) {
return (ClusterClientOptions) getOptions();
}
return null;
}
boolean expireStaleConnections() {
return getClusterClientOptions() == null || getClusterClientOptions().isCloseStaleConnections();
}
static void applyUriConnectionSettings(RedisURI from, RedisURI to) {
if (from.getPassword() != null && from.getPassword().length != 0) {
to.setPassword(new String(from.getPassword()));
}
to.setTimeout(from.getTimeout());
to.setUnit(from.getUnit());
to.setSsl(from.isSsl());
to.setStartTls(from.isStartTls());
to.setVerifyPeer(from.isVerifyPeer());
}
private static void assertNotNull(RedisCodec codec) {
LettuceAssert.notNull(codec, "RedisCodec must not be null");
}
private static void assertNotEmpty(Iterable redisURIs) {
LettuceAssert.notNull(redisURIs, "RedisURIs must not be null");
LettuceAssert.isTrue(redisURIs.iterator().hasNext(), "RedisURIs must not be empty");
}
private static RedisURI assertNotNull(RedisURI redisURI) {
LettuceAssert.notNull(redisURI, "RedisURI must not be null");
return redisURI;
}
private static void assertNotNull(ClientResources clientResources) {
LettuceAssert.notNull(clientResources, "ClientResources must not be null");
}
protected Iterable getTopologyRefreshSource() {
boolean initialSeedNodes = !useDynamicRefreshSources();
Iterable seed;
if (initialSeedNodes || partitions == null || partitions.isEmpty()) {
seed = RedisClusterClient.this.initialUris;
} else {
List uris = new ArrayList<>();
for (RedisClusterNode partition : TopologyComparators.sortByUri(partitions)) {
uris.add(partition.getUri());
}
seed = uris;
}
return seed;
}
protected boolean useDynamicRefreshSources() {
if (getClusterClientOptions() != null) {
ClusterTopologyRefreshOptions topologyRefreshOptions = getClusterClientOptions().getTopologyRefreshOptions();
return topologyRefreshOptions.useDynamicRefreshSources();
}
return true;
}
private class NodeConnectionFactoryImpl implements NodeConnectionFactory {
@Override
public StatefulRedisConnection connectToNode(RedisCodec codec, SocketAddress socketAddress) {
return RedisClusterClient.this.connectToNode(codec, socketAddress.toString(), null, new Supplier() {
@Override
public SocketAddress get() {
return socketAddress;
}
});
}
}
}