All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.lettuce.core.cluster.RedisAdvancedClusterAsyncCommandsImpl Maven / Gradle / Ivy

Go to download

Advanced and thread-safe Java Redis client for synchronous, asynchronous, and reactive usage. Supports Cluster, Sentinel, Pipelining, Auto-Reconnect, Codecs and much more.

The newest version!
/*
 * Copyright 2011-Present, Redis Ltd. and Contributors
 * All rights reserved.
 *
 * Licensed under the MIT License.
 *
 * This file contains contributions from third-party contributors
 * licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package io.lettuce.core.cluster;

import static io.lettuce.core.cluster.ClusterScanSupport.*;
import static io.lettuce.core.cluster.NodeSelectionInvocationHandler.ExecutionModel.*;
import static io.lettuce.core.cluster.models.partitions.RedisClusterNode.NodeFlag.*;

import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;

import io.lettuce.core.*;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.async.RedisAsyncCommands;
import io.lettuce.core.api.async.RedisKeyAsyncCommands;
import io.lettuce.core.api.async.RedisScriptingAsyncCommands;
import io.lettuce.core.api.async.RedisServerAsyncCommands;
import io.lettuce.core.cluster.api.NodeSelectionSupport;
import io.lettuce.core.cluster.api.StatefulRedisClusterConnection;
import io.lettuce.core.cluster.api.async.AsyncNodeSelection;
import io.lettuce.core.cluster.api.async.NodeSelectionAsyncCommands;
import io.lettuce.core.cluster.api.async.RedisAdvancedClusterAsyncCommands;
import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands;
import io.lettuce.core.cluster.models.partitions.Partitions;
import io.lettuce.core.cluster.models.partitions.RedisClusterNode;
import io.lettuce.core.codec.RedisCodec;
import io.lettuce.core.json.JsonParser;
import io.lettuce.core.json.JsonPath;
import io.lettuce.core.json.JsonValue;
import io.lettuce.core.json.arguments.JsonMsetArgs;
import io.lettuce.core.output.IntegerOutput;
import io.lettuce.core.output.KeyStreamingChannel;
import io.lettuce.core.output.KeyValueStreamingChannel;
import io.lettuce.core.protocol.AsyncCommand;
import io.lettuce.core.protocol.Command;
import io.lettuce.core.protocol.CommandType;
import io.lettuce.core.protocol.ConnectionIntent;
import reactor.core.publisher.Mono;

/**
 * An advanced asynchronous and thread-safe API for a Redis Cluster connection.
 *
 * @param  Key type.
 * @param  Value type.
 * @author Mark Paluch
 * @author Jon Chambers
 * @author Tihomir Mateev
 * @since 3.3
 */
@SuppressWarnings("unchecked")
public class RedisAdvancedClusterAsyncCommandsImpl extends AbstractRedisAsyncCommands
        implements RedisAdvancedClusterAsyncCommands {

    private final RedisCodec codec;

    /**
     * Initialize a new connection.
     *
     * @param connection the stateful connection
     * @param codec Codec used to encode/decode keys and values.
     * @deprecated since 5.1, use
     *             {@link #RedisAdvancedClusterAsyncCommandsImpl(StatefulRedisClusterConnection, RedisCodec, Mono)}.
     */
    @Deprecated
    public RedisAdvancedClusterAsyncCommandsImpl(StatefulRedisClusterConnectionImpl connection, RedisCodec codec,
            Mono parser) {
        super(connection, codec, parser);
        this.codec = codec;
    }

    /**
     * Initialize a new connection.
     *
     * @param connection the stateful connection
     * @param codec Codec used to encode/decode keys and values.
     */
    public RedisAdvancedClusterAsyncCommandsImpl(StatefulRedisClusterConnection connection, RedisCodec codec,
            Mono parser) {
        super(connection, codec, parser);
        this.codec = codec;
    }

    @Override
    public RedisFuture clientSetname(K name) {

        Map> executions = new HashMap<>();

        CompletableFuture ok = CompletableFuture.completedFuture("OK");

        executions.put("Default", super.clientSetname(name).toCompletableFuture());

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {

            RedisURI uri = redisClusterNode.getUri();

            CompletableFuture> byNodeId = getConnectionAsync(redisClusterNode.getNodeId());

            executions.put("NodeId: " + redisClusterNode.getNodeId(), byNodeId.thenCompose(c -> {

                if (c.isOpen()) {
                    return c.clientSetname(name);
                }
                return ok;
            }));

            CompletableFuture> byHost = getConnectionAsync(uri.getHost(), uri.getPort());

            executions.put("HostAndPort: " + redisClusterNode.getNodeId(), byHost.thenCompose(c -> {

                if (c.isOpen()) {
                    return c.clientSetname(name);
                }
                return ok;
            }));
        }

        return MultiNodeExecution.firstOfAsync(executions);
    }

    @Override
    public RedisFuture clusterCountKeysInSlot(int slot) {

        RedisClusterAsyncCommands connectionBySlot = findConnectionBySlot(slot);

        if (connectionBySlot != null) {
            return connectionBySlot.clusterCountKeysInSlot(slot);
        }

        return super.clusterCountKeysInSlot(slot);
    }

    @Override
    public RedisFuture> clusterGetKeysInSlot(int slot, int count) {

        RedisClusterAsyncCommands connectionBySlot = findConnectionBySlot(slot);

        if (connectionBySlot != null) {
            return connectionBySlot.clusterGetKeysInSlot(slot, count);
        }

        return super.clusterGetKeysInSlot(slot, count);
    }

    @Override
    public RedisFuture dbsize() {
        return MultiNodeExecution.aggregateAsync(executeOnUpstream(RedisServerAsyncCommands::dbsize));
    }

    @Override
    public RedisFuture del(K... keys) {
        return del(Arrays.asList(keys));
    }

    @Override
    public RedisFuture del(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.del(keys);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture del = super.del(entry.getValue());
            executions.put(entry.getKey(), del);
        }

        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisFuture exists(K... keys) {
        return exists(Arrays.asList(keys));
    }

    public RedisFuture exists(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.exists(keys);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture exists = super.exists(entry.getValue());
            executions.put(entry.getKey(), exists);
        }

        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisFuture flushall() {
        return MultiNodeExecution.firstOfAsync(executeOnUpstream(RedisServerAsyncCommands::flushall));
    }

    @Override
    public RedisFuture flushall(FlushMode flushMode) {
        return MultiNodeExecution.firstOfAsync(
                executeOnUpstream(kvRedisClusterAsyncCommands -> kvRedisClusterAsyncCommands.flushall(flushMode)));
    }

    @Override
    public RedisFuture flushallAsync() {
        return MultiNodeExecution.firstOfAsync(executeOnUpstream(RedisServerAsyncCommands::flushallAsync));
    }

    @Override
    public RedisFuture flushdb() {
        return MultiNodeExecution.firstOfAsync(executeOnUpstream(RedisServerAsyncCommands::flushdb));
    }

    @Override
    public RedisFuture flushdb(FlushMode flushMode) {
        return MultiNodeExecution
                .firstOfAsync(executeOnUpstream(kvRedisClusterAsyncCommands -> kvRedisClusterAsyncCommands.flushdb(flushMode)));
    }

    @Override
    public RedisFuture> keys(K pattern) {

        Map>> executions = executeOnUpstream(commands -> commands.keys(pattern));

        return new PipelinedRedisFuture<>(executions, objectPipelinedRedisFuture -> {
            List result = new ArrayList<>();
            for (CompletableFuture> future : executions.values()) {
                result.addAll(MultiNodeExecution.execute(future::get));
            }
            return result;
        });
    }

    @Override
    public RedisFuture keys(KeyStreamingChannel channel, K pattern) {

        Map> executions = executeOnUpstream(commands -> commands.keys(channel, pattern));
        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisFuture> jsonMGet(JsonPath jsonPath, K... keys) {
        Map> partitioned = SlotHash.partition(codec, Arrays.asList(keys));

        if (partitioned.size() < 2) {
            return super.jsonMGet(jsonPath, keys);
        }

        // For a given partition, maps the key to its index within the List in partitioned for faster lookups below
        Map> keysToIndexes = mapKeyToIndex(partitioned);
        Map slots = SlotHash.getSlots(partitioned);
        Map>> executions = new HashMap<>(partitioned.size());

        for (Map.Entry> entry : partitioned.entrySet()) {
            K[] partitionKeys = entry.getValue().toArray((K[]) new Object[entry.getValue().size()]);
            RedisFuture> jsonMget = super.jsonMGet(jsonPath, partitionKeys);
            executions.put(entry.getKey(), jsonMget);
        }

        // restore order of key
        return new PipelinedRedisFuture<>(executions, objectPipelinedRedisFuture -> {
            List result = new ArrayList<>(slots.size());
            for (K opKey : keys) {
                int slot = slots.get(opKey);

                int position = keysToIndexes.get(slot).get(opKey);
                RedisFuture> listRedisFuture = executions.get(slot);
                result.add(MultiNodeExecution.execute(() -> listRedisFuture.get().get(position)));
            }

            return result;
        });
    }

    private Map> mapKeyToIndex(Map> partitioned) {
        Map> result = new HashMap<>(partitioned.size());
        for (Integer partition : partitioned.keySet()) {
            List keysForPartition = partitioned.get(partition);
            Map keysToIndexes = new HashMap<>(keysForPartition.size());
            for (int i = 0; i < keysForPartition.size(); i++) {
                keysToIndexes.put(keysForPartition.get(i), i);
            }
            result.put(partition, keysToIndexes);
        }

        return result;
    }

    @Override
    public RedisFuture>> mget(K... keys) {
        return mget(Arrays.asList(keys));
    }

    @Override
    public RedisFuture>> mget(Iterable keys) {
        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.mget(keys);
        }

        // For a given partition, maps the key to its index within the List in partitioned for faster lookups below
        Map> partitionedKeysToIndexes = mapKeyToIndex(partitioned);
        Map slots = SlotHash.getSlots(partitioned);
        Map>>> executions = new HashMap<>(partitioned.size());

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture>> mget = super.mget(entry.getValue());
            executions.put(entry.getKey(), mget);
        }

        // restore order of key
        return new PipelinedRedisFuture<>(executions, objectPipelinedRedisFuture -> {
            List> result = new ArrayList<>(slots.size());
            for (K opKey : keys) {
                int slot = slots.get(opKey);

                int position = partitionedKeysToIndexes.get(slot).get(opKey);
                RedisFuture>> listRedisFuture = executions.get(slot);
                result.add(MultiNodeExecution.execute(() -> listRedisFuture.get().get(position)));
            }

            return result;
        });
    }

    @Override
    public RedisFuture mget(KeyValueStreamingChannel channel, K... keys) {
        return mget(channel, Arrays.asList(keys));
    }

    @Override
    public RedisFuture mget(KeyValueStreamingChannel channel, Iterable keys) {
        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.mget(channel, keys);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture del = super.mget(channel, entry.getValue());
            executions.put(entry.getKey(), del);
        }

        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisFuture jsonMSet(List> arguments) {
        List keys = arguments.stream().map(JsonMsetArgs::getKey).collect(Collectors.toList());
        Map>> argsPerKey = arguments.stream().collect(Collectors.groupingBy(JsonMsetArgs::getKey));
        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.jsonMSet(arguments);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {

            List> op = new ArrayList<>();
            entry.getValue().forEach(k -> op.addAll(argsPerKey.get(k)));

            RedisFuture mset = super.jsonMSet(op);
            executions.put(entry.getKey(), mset);
        }

        return MultiNodeExecution.firstOfAsync(executions);
    }

    @Override
    public RedisFuture mset(Map map) {

        Map> partitioned = SlotHash.partition(codec, map.keySet());

        if (partitioned.size() < 2) {
            return super.mset(map);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {

            Map op = new HashMap<>();
            entry.getValue().forEach(k -> op.put(k, map.get(k)));

            RedisFuture mset = super.mset(op);
            executions.put(entry.getKey(), mset);
        }

        return MultiNodeExecution.firstOfAsync(executions);
    }

    @Override
    public RedisFuture msetnx(Map map) {

        Map> partitioned = SlotHash.partition(codec, map.keySet());

        if (partitioned.size() < 2) {
            return super.msetnx(map);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {

            Map op = new HashMap<>();
            entry.getValue().forEach(k -> op.put(k, map.get(k)));

            RedisFuture msetnx = super.msetnx(op);
            executions.put(entry.getKey(), msetnx);
        }

        return new PipelinedRedisFuture<>(executions, objectPipelinedRedisFuture -> {

            for (RedisFuture listRedisFuture : executions.values()) {
                Boolean b = MultiNodeExecution.execute(() -> listRedisFuture.get());
                if (b == null || !b) {
                    return false;
                }
            }

            return !executions.isEmpty();
        });
    }

    @Override
    public RedisFuture randomkey() {

        Partitions partitions = getStatefulConnection().getPartitions();

        if (partitions.isEmpty()) {
            return super.randomkey();
        }

        int index = ThreadLocalRandom.current().nextInt(partitions.size());
        RedisClusterNode partition = partitions.getPartition(index);

        CompletableFuture future = getConnectionAsync(partition.getUri().getHost(), partition.getUri().getPort())
                .thenCompose(RedisKeyAsyncCommands::randomkey);

        return new PipelinedRedisFuture<>(future);
    }

    @Override
    public RedisFuture scriptFlush() {

        Map> executions = executeOnNodes(RedisScriptingAsyncCommands::scriptFlush,
                redisClusterNode -> true);
        return MultiNodeExecution.firstOfAsync(executions);
    }

    @Override
    public RedisFuture scriptKill() {

        Map> executions = executeOnNodes(RedisScriptingAsyncCommands::scriptKill,
                redisClusterNode -> true);
        return MultiNodeExecution.alwaysOkOfAsync(executions);
    }

    @Override
    public RedisFuture scriptLoad(byte[] script) {

        Map> executions = executeOnNodes(cmd -> cmd.scriptLoad(script),
                redisClusterNode -> true);
        return MultiNodeExecution.lastOfAsync(executions);
    }

    @Override
    public void shutdown(boolean save) {

        executeOnNodes(commands -> {
            commands.shutdown(save);

            Command command = new Command<>(CommandType.SHUTDOWN, new IntegerOutput<>(codec), null);
            AsyncCommand async = new AsyncCommand(command);
            async.complete();
            return async;
        }, redisClusterNode -> true);
    }

    @Override
    public RedisFuture touch(K... keys) {
        return touch(Arrays.asList(keys));
    }

    public RedisFuture touch(Iterable keys) {
        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.touch(keys);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture touch = super.touch(entry.getValue());
            executions.put(entry.getKey(), touch);
        }

        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisFuture unlink(K... keys) {
        return unlink(Arrays.asList(keys));
    }

    @Override
    public RedisFuture unlink(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.unlink(keys);
        }

        Map> executions = new HashMap<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            RedisFuture unlink = super.unlink(entry.getValue());
            executions.put(entry.getKey(), unlink);
        }

        return MultiNodeExecution.aggregateAsync(executions);
    }

    @Override
    public RedisClusterAsyncCommands getConnection(String nodeId) {
        return getStatefulConnection().getConnection(nodeId).async();
    }

    @Override
    public RedisClusterAsyncCommands getConnection(String host, int port) {
        return getStatefulConnection().getConnection(host, port).async();
    }

    private CompletableFuture> getConnectionAsync(String nodeId) {
        return getConnectionProvider(). getConnectionAsync(ConnectionIntent.WRITE, nodeId)
                .thenApply(StatefulRedisConnection::async);
    }

    private CompletableFuture> getConnectionAsync(String host, int port) {
        return getConnectionProvider(). getConnectionAsync(ConnectionIntent.WRITE, host, port)
                .thenApply(StatefulRedisConnection::async);
    }

    @Override
    public StatefulRedisClusterConnection getStatefulConnection() {
        return (StatefulRedisClusterConnection) super.getConnection();
    }

    @Override
    public AsyncNodeSelection nodes(Predicate predicate) {
        return nodes(predicate, false);
    }

    @Override
    public AsyncNodeSelection readonly(Predicate predicate) {
        return nodes(predicate, ConnectionIntent.READ, false);
    }

    @Override
    public AsyncNodeSelection nodes(Predicate predicate, boolean dynamic) {
        return nodes(predicate, ConnectionIntent.WRITE, dynamic);
    }

    @SuppressWarnings("unchecked")
    protected AsyncNodeSelection nodes(Predicate predicate, ConnectionIntent connectionIntent,
            boolean dynamic) {

        NodeSelectionSupport, ?> selection;

        StatefulRedisClusterConnectionImpl impl = (StatefulRedisClusterConnectionImpl) getConnection();
        if (dynamic) {
            selection = new DynamicNodeSelection, Object, K, V>(
                    impl.getClusterDistributionChannelWriter(), predicate, connectionIntent, StatefulRedisConnection::async);
        } else {
            selection = new StaticNodeSelection, Object, K, V>(
                    impl.getClusterDistributionChannelWriter(), predicate, connectionIntent, StatefulRedisConnection::async);
        }

        NodeSelectionInvocationHandler h = new NodeSelectionInvocationHandler((AbstractNodeSelection) selection,
                RedisClusterAsyncCommands.class, ASYNC);
        return (AsyncNodeSelection) Proxy.newProxyInstance(NodeSelectionSupport.class.getClassLoader(),
                new Class[] { NodeSelectionAsyncCommands.class, AsyncNodeSelection.class }, h);
    }

    @Override
    public RedisFuture> scan() {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(), asyncClusterKeyScanCursorMapper());
    }

    @Override
    public RedisFuture> scan(ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(scanArgs),
                asyncClusterKeyScanCursorMapper());
    }

    @Override
    public RedisFuture> scan(ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(cursor, scanArgs),
                asyncClusterKeyScanCursorMapper());
    }

    @Override
    public RedisFuture> scan(ScanCursor scanCursor) {
        return clusterScan(scanCursor, RedisKeyAsyncCommands::scan, asyncClusterKeyScanCursorMapper());
    }

    @Override
    public RedisFuture scan(KeyStreamingChannel channel) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel),
                asyncClusterStreamScanCursorMapper());
    }

    @Override
    public RedisFuture scan(KeyStreamingChannel channel, ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel, scanArgs),
                asyncClusterStreamScanCursorMapper());
    }

    @Override
    public RedisFuture scan(KeyStreamingChannel channel, ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor, scanArgs),
                asyncClusterStreamScanCursorMapper());
    }

    @Override
    public RedisFuture scan(KeyStreamingChannel channel, ScanCursor scanCursor) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor),
                asyncClusterStreamScanCursorMapper());
    }

    private  RedisFuture clusterScan(ScanCursor cursor,
            BiFunction, ScanCursor, RedisFuture> scanFunction,
            ScanCursorMapper> resultMapper) {

        return clusterScan(getStatefulConnection(), cursor, scanFunction, resultMapper);
    }

    /**
     * Run a command on all available masters,
     *
     * @param function function producing the command
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnUpstream(
            Function, RedisFuture> function) {
        return executeOnNodes(function, redisClusterNode -> redisClusterNode.is(UPSTREAM));
    }

    /**
     * Run a command on all available nodes that match {@code filter}.
     *
     * @param function function producing the command
     * @param filter filter function for the node selection
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnNodes(
            Function, RedisFuture> function, Function filter) {
        Map> executions = new HashMap<>();

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {

            if (!filter.apply(redisClusterNode)) {
                continue;
            }

            RedisURI uri = redisClusterNode.getUri();
            CompletableFuture> connection = getConnectionAsync(uri.getHost(), uri.getPort());

            executions.put(redisClusterNode.getNodeId(), connection.thenCompose(function::apply));

        }
        return executions;
    }

    private RedisClusterAsyncCommands findConnectionBySlot(int slot) {
        RedisClusterNode node = getStatefulConnection().getPartitions().getPartitionBySlot(slot);
        if (node != null) {
            return getConnection(node.getUri().getHost(), node.getUri().getPort());
        }

        return null;
    }

    private AsyncClusterConnectionProvider getConnectionProvider() {

        ClusterDistributionChannelWriter writer = (ClusterDistributionChannelWriter) getStatefulConnection().getChannelWriter();
        return (AsyncClusterConnectionProvider) writer.getClusterConnectionProvider();
    }

    /**
     * Perform a SCAN in the cluster.
     *
     */
    static  RedisFuture clusterScan(StatefulRedisClusterConnection connection,
            ScanCursor cursor, BiFunction, ScanCursor, RedisFuture> scanFunction,
            ScanCursorMapper> mapper) {

        List nodeIds = ClusterScanSupport.getNodeIds(connection, cursor);
        String currentNodeId = ClusterScanSupport.getCurrentNodeId(cursor, nodeIds);
        ScanCursor continuationCursor = ClusterScanSupport.getContinuationCursor(cursor);

        RedisFuture scanCursor = scanFunction.apply(connection.getConnection(currentNodeId).async(), continuationCursor);
        return mapper.map(nodeIds, currentNodeId, scanCursor);
    }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy