All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.lettuce.core.cluster.RedisAdvancedClusterReactiveCommandsImpl Maven / Gradle / Ivy

Go to download

Advanced and thread-safe Java Redis client for synchronous, asynchronous, and reactive usage. Supports Cluster, Sentinel, Pipelining, Auto-Reconnect, Codecs and much more.

The newest version!
/*
 * Copyright 2011-Present, Redis Ltd. and Contributors
 * All rights reserved.
 *
 * Licensed under the MIT License.
 *
 * This file contains contributions from third-party contributors
 * licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package io.lettuce.core.cluster;

import static io.lettuce.core.cluster.ClusterScanSupport.*;
import static io.lettuce.core.cluster.models.partitions.RedisClusterNode.NodeFlag.*;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;

import io.lettuce.core.json.JsonParser;
import org.reactivestreams.Publisher;

import io.lettuce.core.*;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.reactive.RedisKeyReactiveCommands;
import io.lettuce.core.api.reactive.RedisScriptingReactiveCommands;
import io.lettuce.core.api.reactive.RedisServerReactiveCommands;
import io.lettuce.core.cluster.api.StatefulRedisClusterConnection;
import io.lettuce.core.cluster.api.reactive.RedisAdvancedClusterReactiveCommands;
import io.lettuce.core.cluster.api.reactive.RedisClusterReactiveCommands;
import io.lettuce.core.cluster.models.partitions.Partitions;
import io.lettuce.core.cluster.models.partitions.RedisClusterNode;
import io.lettuce.core.codec.RedisCodec;
import io.lettuce.core.internal.LettuceLists;
import io.lettuce.core.output.KeyStreamingChannel;
import io.lettuce.core.output.KeyValueStreamingChannel;
import io.lettuce.core.protocol.ConnectionIntent;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

/**
 * An advanced reactive and thread-safe API to a Redis Cluster connection.
 *
 * @param  Key type.
 * @param  Value type.
 * @author Mark Paluch
 * @author Jon Chambers
 * @since 4.0
 */
public class RedisAdvancedClusterReactiveCommandsImpl extends AbstractRedisReactiveCommands
        implements RedisAdvancedClusterReactiveCommands {

    private static final Predicate ALL_NODES = node -> true;

    private final RedisCodec codec;

    /**
     * Initialize a new connection.
     *
     * @param connection the stateful connection.
     * @param codec Codec used to encode/decode keys and values.
     * @deprecated since 5.2, use
     *             {@link #RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection, RedisCodec, Mono)}.
     */
    @Deprecated
    public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnectionImpl connection, RedisCodec codec,
            Mono parser) {
        super(connection, codec, parser);
        this.codec = codec;
    }

    /**
     * Initialize a new connection.
     *
     * @param connection the stateful connection.
     * @param codec Codec used to encode/decode keys and values.
     */
    public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection connection, RedisCodec codec,
            Mono parser) {
        super(connection, codec, parser);
        this.codec = codec;
    }

    @Override
    public Mono clientSetname(K name) {

        List> publishers = new ArrayList<>();

        publishers.add(super.clientSetname(name));

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {

            Mono> byNodeId = getConnectionReactive(redisClusterNode.getNodeId());

            publishers.add(byNodeId.flatMap(conn -> {

                if (conn.isOpen()) {
                    return conn.clientSetname(name);
                }
                return Mono.empty();
            }));

            Mono> byHost = getConnectionReactive(redisClusterNode.getUri().getHost(),
                    redisClusterNode.getUri().getPort());

            publishers.add(byHost.flatMap(conn -> {

                if (conn.isOpen()) {
                    return conn.clientSetname(name);
                }
                return Mono.empty();
            }));
        }

        return Flux.merge(publishers).last();
    }

    @Override
    public Mono clusterCountKeysInSlot(int slot) {

        Mono> connectionBySlot = findConnectionBySlotReactive(slot);
        return connectionBySlot.flatMap(cmd -> cmd.clusterCountKeysInSlot(slot));
    }

    @Override
    public Flux clusterGetKeysInSlot(int slot, int count) {

        Mono> connectionBySlot = findConnectionBySlotReactive(slot);
        return connectionBySlot.flatMapMany(conn -> conn.clusterGetKeysInSlot(slot, count));
    }

    @Override
    public Mono dbsize() {

        Map> publishers = executeOnUpstream(RedisServerReactiveCommands::dbsize);
        return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono del(K... keys) {
        return del(Arrays.asList(keys));
    }

    @Override
    public Mono del(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.del(keys);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.del(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono exists(K... keys) {
        return exists(Arrays.asList(keys));
    }

    public Mono exists(Iterable keys) {

        List keyList = LettuceLists.newList(keys);

        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.exists(keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.exists(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono flushall() {

        Map> publishers = executeOnUpstream(RedisServerReactiveCommands::flushall);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono flushall(FlushMode flushMode) {

        Map> publishers = executeOnUpstream(it -> it.flushall(flushMode));
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono flushallAsync() {

        Map> publishers = executeOnUpstream(RedisServerReactiveCommands::flushallAsync);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono flushdb() {

        Map> publishers = executeOnUpstream(RedisServerReactiveCommands::flushdb);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono flushdb(FlushMode flushMode) {

        Map> publishers = executeOnUpstream(it -> it.flushdb(flushMode));
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Flux keys(K pattern) {

        Map> publishers = executeOnUpstream(commands -> commands.keys(pattern));
        return Flux.merge(publishers.values());
    }

    @Override
    public Mono keys(KeyStreamingChannel channel, K pattern) {

        Map> publishers = executeOnUpstream(commands -> commands.keys(channel, pattern));
        return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
    }

    @Override
    public Flux> mget(K... keys) {
        return mget(Arrays.asList(keys));
    }

    @SuppressWarnings({ "unchecked", "rawtypes" })
    public Flux> mget(Iterable keys) {

        List keyList = LettuceLists.newList(keys);
        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.mget(keyList);
        }

        List>> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.mget(entry.getValue()));
        }

        Flux> fluxes = Flux.mergeSequential(publishers);

        Mono>> map = fluxes.collectList().map(vs -> {

            KeyValue[] values = new KeyValue[vs.size()];
            int offset = 0;
            for (Map.Entry> entry : partitioned.entrySet()) {

                for (int i = 0; i < keyList.size(); i++) {

                    int index = entry.getValue().indexOf(keyList.get(i));
                    if (index == -1) {
                        continue;
                    }

                    values[i] = vs.get(offset + index);
                }

                offset += entry.getValue().size();
            }

            return Arrays.asList(values);
        });

        return map.flatMapIterable(keyValues -> keyValues);
    }

    @Override
    public Mono mget(KeyValueStreamingChannel channel, K... keys) {
        return mget(channel, Arrays.asList(keys));
    }

    @Override
    public Mono mget(KeyValueStreamingChannel channel, Iterable keys) {

        List keyList = LettuceLists.newList(keys);

        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.mget(channel, keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.mget(channel, entry.getValue()));
        }

        return Flux.merge(publishers).reduce(Long::sum);
    }

    @Override
    public Mono msetnx(Map map) {

        return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.msetnx(kvMap).flux(),
                booleanFlux -> booleanFlux).reduce((accu, next) -> accu && next);
    }

    @Override
    public Mono mset(Map map) {
        return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.mset(kvMap).flux(),
                booleanFlux -> booleanFlux).last();
    }

    @Override
    public Mono randomkey() {

        Partitions partitions = getStatefulConnection().getPartitions();

        if (partitions.isEmpty()) {
            return super.randomkey();
        }

        int index = ThreadLocalRandom.current().nextInt(partitions.size());

        Mono> connection = getConnectionReactive(partitions.getPartition(index).getNodeId());
        return connection.flatMap(RedisKeyReactiveCommands::randomkey);
    }

    @Override
    public Mono scriptFlush() {
        Map> publishers = executeOnNodes(RedisScriptingReactiveCommands::scriptFlush, ALL_NODES);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono scriptKill() {
        Map> publishers = executeOnNodes(RedisScriptingReactiveCommands::scriptKill, ALL_NODES);
        return Flux.merge(publishers.values()).onErrorReturn("OK").last();
    }

    @Override
    public Mono scriptLoad(byte[] script) {
        Map> publishers = executeOnNodes((commands) -> commands.scriptLoad(script), ALL_NODES);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono shutdown(boolean save) {
        Map> publishers = executeOnNodes(commands -> commands.shutdown(save), ALL_NODES);
        return Flux.merge(publishers.values()).then();
    }

    @Override
    public Mono touch(K... keys) {
        return touch(Arrays.asList(keys));
    }

    public Mono touch(Iterable keys) {

        List keyList = LettuceLists.newList(keys);
        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.touch(keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.touch(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono unlink(K... keys) {
        return unlink(Arrays.asList(keys));
    }

    @Override
    public Mono unlink(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.unlink(keys);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.unlink(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public RedisClusterReactiveCommands getConnection(String nodeId) {
        return getStatefulConnection().getConnection(nodeId).reactive();
    }

    private Mono> getConnectionReactive(String nodeId) {
        return getMono(getConnectionProvider(). getConnectionAsync(ConnectionIntent.WRITE, nodeId))
                .map(StatefulRedisConnection::reactive);
    }

    @Override
    public RedisClusterReactiveCommands getConnection(String host, int port) {
        return getStatefulConnection().getConnection(host, port).reactive();
    }

    private Mono> getConnectionReactive(String host, int port) {
        return getMono(getConnectionProvider(). getConnectionAsync(ConnectionIntent.WRITE, host, port))
                .map(StatefulRedisConnection::reactive);
    }

    @Override
    public StatefulRedisClusterConnection getStatefulConnection() {
        return (StatefulRedisClusterConnection) super.getConnection();
    }

    @Override
    public Mono> scan() {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(), reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(scanArgs),
                reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(cursor, scanArgs),
                reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanCursor scanCursor) {
        return clusterScan(scanCursor, RedisKeyReactiveCommands::scan, reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel, scanArgs),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor, scanArgs),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanCursor scanCursor) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor),
                reactiveClusterStreamScanCursorMapper());
    }

    @SuppressWarnings("unchecked")
    private  Mono clusterScan(ScanCursor cursor,
            BiFunction, ScanCursor, Mono> scanFunction,
            ClusterScanSupport.ScanCursorMapper> resultMapper) {

        return clusterScan(getStatefulConnection(), getConnectionProvider(), cursor, scanFunction,
                (ClusterScanSupport.ScanCursorMapper) resultMapper);
    }

    private  Flux pipeliningWithMap(Map map, Function, Flux> function,
            Function, Flux> resultFunction) {

        Map> partitioned = SlotHash.partition(codec, map.keySet());

        if (partitioned.size() < 2) {
            return function.apply(map);
        }

        List> publishers = partitioned.values().stream().map(ks -> {
            Map op = new HashMap<>();
            ks.forEach(k -> op.put(k, map.get(k)));
            return function.apply(op);
        }).collect(Collectors.toList());

        return resultFunction.apply(Flux.merge(publishers));
    }

    /**
     * Run a command on all available masters,
     *
     * @param function function producing the command
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnUpstream(
            Function, ? extends Publisher> function) {
        return executeOnNodes(function, redisClusterNode -> redisClusterNode.is(UPSTREAM));
    }

    /**
     * Run a command on all available nodes that match {@code filter}.
     *
     * @param function function producing the command
     * @param filter filter function for the node selection
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnNodes(
            Function, ? extends Publisher> function, Predicate filter) {

        Map> executions = new HashMap<>();

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {

            if (!filter.test(redisClusterNode)) {
                continue;
            }

            RedisURI uri = redisClusterNode.getUri();
            Mono> connection = getConnectionReactive(uri.getHost(), uri.getPort());

            executions.put(redisClusterNode.getNodeId(), connection.flatMapMany(function::apply));
        }
        return executions;
    }

    private Mono> findConnectionBySlotReactive(int slot) {

        RedisClusterNode node = getStatefulConnection().getPartitions().getPartitionBySlot(slot);
        if (node != null) {
            return getConnectionReactive(node.getUri().getHost(), node.getUri().getPort());
        }

        return Mono.error(new RedisException("No partition for slot " + slot));
    }

    private AsyncClusterConnectionProvider getConnectionProvider() {

        ClusterDistributionChannelWriter writer = (ClusterDistributionChannelWriter) getStatefulConnection().getChannelWriter();
        return (AsyncClusterConnectionProvider) writer.getClusterConnectionProvider();
    }

    /**
     * Perform a SCAN in the cluster.
     *
     */
    static  Mono clusterScan(StatefulRedisClusterConnection connection,
            AsyncClusterConnectionProvider connectionProvider, ScanCursor cursor,
            BiFunction, ScanCursor, Mono> scanFunction,
            ClusterScanSupport.ScanCursorMapper> mapper) {

        List nodeIds = ClusterScanSupport.getNodeIds(connection, cursor);
        String currentNodeId = ClusterScanSupport.getCurrentNodeId(cursor, nodeIds);
        ScanCursor continuationCursor = ClusterScanSupport.getContinuationCursor(cursor);

        Mono scanCursor = getMono(connectionProvider. getConnectionAsync(ConnectionIntent.WRITE, currentNodeId))
                .flatMap(conn -> scanFunction.apply(conn.reactive(), continuationCursor));
        return mapper.map(nodeIds, currentNodeId, scanCursor);
    }

    private static  Mono getMono(CompletableFuture future) {
        return Mono.fromCompletionStage(future);
    }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy