All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.lambdaworks.redis.cluster.RedisAdvancedClusterReactiveCommandsImpl Maven / Gradle / Ivy

Go to download

Advanced and thread-safe Java Redis client for synchronous, asynchronous, and reactive usage. Supports Cluster, Sentinel, Pipelining, Auto-Reconnect, Codecs and much more.

The newest version!
/*
 * Copyright 2011-2016 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.lambdaworks.redis.cluster;

import static com.lambdaworks.redis.cluster.ClusterScanSupport.reactiveClusterKeyScanCursorMapper;
import static com.lambdaworks.redis.cluster.ClusterScanSupport.reactiveClusterStreamScanCursorMapper;
import static com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode.NodeFlag.MASTER;

import java.util.*;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;

import org.reactivestreams.Publisher;

import com.lambdaworks.redis.*;
import com.lambdaworks.redis.api.StatefulRedisConnection;
import com.lambdaworks.redis.api.reactive.RedisKeyReactiveCommands;
import com.lambdaworks.redis.cluster.api.StatefulRedisClusterConnection;
import com.lambdaworks.redis.cluster.api.reactive.RedisAdvancedClusterReactiveCommands;
import com.lambdaworks.redis.cluster.api.reactive.RedisClusterReactiveCommands;
import com.lambdaworks.redis.cluster.models.partitions.Partitions;
import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode;
import com.lambdaworks.redis.codec.RedisCodec;
import com.lambdaworks.redis.internal.LettuceLists;
import com.lambdaworks.redis.output.KeyStreamingChannel;
import com.lambdaworks.redis.output.KeyValueStreamingChannel;

import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

/**
 * An advanced reactive and thread-safe API to a Redis Cluster connection.
 *
 * @author Mark Paluch
 * @since 4.0
 */
public class RedisAdvancedClusterReactiveCommandsImpl extends AbstractRedisReactiveCommands
        implements RedisAdvancedClusterReactiveCommands {

    private final Random random = new Random();

    /**
     * Initialize a new connection.
     *
     * @param connection the stateful connection
     * @param codec Codec used to encode/decode keys and values.
     */
    public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnectionImpl connection,
            RedisCodec codec) {
        super(connection, codec);
    }

    @Override
    public Mono del(K... keys) {
        return del(Arrays.asList(keys));
    }

    @Override
    public Mono del(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.del(keys);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.del(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono unlink(K... keys) {
        return unlink(Arrays.asList(keys));
    }

    @Override
    public Mono unlink(Iterable keys) {

        Map> partitioned = SlotHash.partition(codec, keys);

        if (partitioned.size() < 2) {
            return super.unlink(keys);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.unlink(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono exists(K... keys) {
        return exists(Arrays.asList(keys));
    }

    public Mono exists(Iterable keys) {

        List keyList = LettuceLists.newList(keys);

        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.exists(keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.exists(entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Flux> mget(K... keys) {
        return mget(Arrays.asList(keys));
    }

    @SuppressWarnings("unchecked")
    public Flux> mget(Iterable keys) {

        List keyList = LettuceLists.newList(keys);
        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.mget(keyList);
        }

        List>> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.mget(entry.getValue()));
        }

        Flux> fluxes = Flux.concat(publishers);

        Mono>> map = fluxes.collectList().map(vs -> {

            KeyValue[] values = new KeyValue[vs.size()];
            int offset = 0;
            for (Map.Entry> entry : partitioned.entrySet()) {

                for (int i = 0; i < keyList.size(); i++) {

                    int index = entry.getValue().indexOf(keyList.get(i));
                    if (index == -1) {
                        continue;
                    }

                    values[i] = vs.get(offset + index);
                }

                offset += entry.getValue().size();
            }

            List> objects = new ArrayList<>(Arrays.asList(values));
            return objects;
        });

        return map.flatMap(Flux::fromIterable);
    }

    @Override
    public Mono mget(KeyValueStreamingChannel channel, K... keys) {
        return mget(channel, Arrays.asList(keys));
    }

    @Override
    public Mono mget(KeyValueStreamingChannel channel, Iterable keys) {

        List keyList = LettuceLists.newList(keys);

        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.mget(channel, keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.mget(channel, entry.getValue()));
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono msetnx(Map map) {

        return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.msetnx(kvMap).flux(),
                booleanFlux -> booleanFlux).reduce((accu, next) -> accu && next);
    }

    @Override
    public Mono mset(Map map) {
        return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.mset(kvMap).flux(),
                booleanFlux -> booleanFlux).last();
    }

    @Override
    public Flux clusterGetKeysInSlot(int slot, int count) {
        RedisClusterReactiveCommands connectionBySlot = findConnectionBySlot(slot);

        if (connectionBySlot != null) {
            return connectionBySlot.clusterGetKeysInSlot(slot, count);
        }

        return super.clusterGetKeysInSlot(slot, count);
    }

    @Override
    public Mono clusterCountKeysInSlot(int slot) {
        RedisClusterReactiveCommands connectionBySlot = findConnectionBySlot(slot);

        if (connectionBySlot != null) {
            return connectionBySlot.clusterCountKeysInSlot(slot);
        }

        return super.clusterCountKeysInSlot(slot);
    }

    @Override
    public Mono clientSetname(K name) {
        List> publishers = new ArrayList<>();

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {
            StatefulRedisConnection byNodeId = getStatefulConnection().getConnection(redisClusterNode.getNodeId());
            if (byNodeId.isOpen()) {
                publishers.add(byNodeId.reactive().clientSetname(name));
            }

            StatefulRedisConnection byHost = getStatefulConnection().getConnection(redisClusterNode.getUri().getHost(),
                    redisClusterNode.getUri().getPort());
            if (byHost.isOpen()) {
                publishers.add(byHost.reactive().clientSetname(name));
            }
        }

        return Flux.merge(publishers).last();
    }

    @Override
    public Mono dbsize() {
        Map> publishers = executeOnMasters((commands) -> commands.dbsize().flux());
        return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono flushall() {
        Map> publishers = executeOnMasters(
                (kvRedisClusterReactiveCommancommandss) -> kvRedisClusterReactiveCommancommandss.flushall().flux());
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono flushdb() {
        Map> publishers = executeOnMasters((commands) -> commands.flushdb().flux());
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Flux keys(K pattern) {
        Map> publishers = executeOnMasters(commands -> commands.keys(pattern));
        return Flux.merge(publishers.values());
    }

    @Override
    public Mono keys(KeyStreamingChannel channel, K pattern) {
        Map> publishers = executeOnMasters(commands -> commands.keys(channel, pattern).flux());
        return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
    }

    @Override
    public Mono randomkey() {

        Partitions partitions = getStatefulConnection().getPartitions();
        int index = random.nextInt(partitions.size());

        RedisClusterReactiveCommands connection = getConnection(partitions.getPartition(index).getNodeId());
        return connection.randomkey();
    }

    @Override
    public Mono scriptFlush() {
        Map> publishers = executeOnNodes((commands) -> commands.scriptFlush().flux(),
                redisClusterNode -> true);
        return Flux.merge(publishers.values()).last();
    }

    @Override
    public Mono scriptKill() {
        Map> publishers = executeOnNodes((commands) -> commands.scriptFlush().flux(),
                redisClusterNode -> true);
        return Flux.merge(publishers.values()).onErrorReturn("OK").last();
    }

    @Override
    public Mono shutdown(boolean save) {
        Map> publishers = executeOnNodes(commands -> commands.shutdown(save).flux(),
                redisClusterNode -> true);
        return Flux.merge(publishers.values()).then();
    }

    @Override
    public Mono touch(K... keys) {
        return touch(Arrays.asList(keys));
    }

    public Mono touch(Iterable keys) {

        List keyList = LettuceLists.newList(keys);
        Map> partitioned = SlotHash.partition(codec, keyList);

        if (partitioned.size() < 2) {
            return super.touch(keyList);
        }

        List> publishers = new ArrayList<>();

        for (Map.Entry> entry : partitioned.entrySet()) {
            publishers.add(super.touch(entry.getValue()).flux());
        }

        return Flux.merge(publishers).reduce((accu, next) -> accu + next);
    }

    /**
     * Run a command on all available masters,
     * 
     * @param function function producing the command
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnMasters(Function, Flux> function) {
        return executeOnNodes(function, redisClusterNode -> redisClusterNode.is(MASTER));
    }

    /**
     * Run a command on all available nodes that match {@code filter}.
     * 
     * @param function function producing the command
     * @param filter filter function for the node selection
     * @param  result type
     * @return map of a key (counter) and commands.
     */
    protected  Map> executeOnNodes(Function, Flux> function,
            Function filter) {
        Map> executions = new HashMap<>();

        for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {

            if (!filter.apply(redisClusterNode)) {
                continue;
            }

            RedisURI uri = redisClusterNode.getUri();
            StatefulRedisConnection connection = getStatefulConnection().getConnection(uri.getHost(), uri.getPort());
            if (connection.isOpen()) {
                executions.put(redisClusterNode.getNodeId(), function.apply(connection.reactive()));
            }
        }
        return executions;
    }

    private RedisClusterReactiveCommands findConnectionBySlot(int slot) {
        RedisClusterNode node = getStatefulConnection().getPartitions().getPartitionBySlot(slot);
        if (node != null) {
            return getConnection(node.getUri().getHost(), node.getUri().getPort());
        }

        return null;
    }

    @Override
    public StatefulRedisClusterConnection getStatefulConnection() {
        return (StatefulRedisClusterConnection) connection;
    }

    @Override
    public RedisClusterReactiveCommands getConnection(String nodeId) {
        return getStatefulConnection().getConnection(nodeId).reactive();
    }

    @Override
    public RedisClusterReactiveCommands getConnection(String host, int port) {
        return getStatefulConnection().getConnection(host, port).reactive();
    }

    @Override
    public Mono> scan() {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(), reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(scanArgs),
                reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(cursor, scanArgs),
                reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono> scan(ScanCursor scanCursor) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(cursor), reactiveClusterKeyScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanArgs scanArgs) {
        return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel, scanArgs),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanCursor scanCursor, ScanArgs scanArgs) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor, scanArgs),
                reactiveClusterStreamScanCursorMapper());
    }

    @Override
    public Mono scan(KeyStreamingChannel channel, ScanCursor scanCursor) {
        return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor),
                reactiveClusterStreamScanCursorMapper());
    }

    private  Mono clusterScan(ScanCursor cursor,
            BiFunction, ScanCursor, Mono> scanFunction,
            ClusterScanSupport.ScanCursorMapper> resultMapper) {

        return clusterScan(getStatefulConnection(), cursor, scanFunction, (ClusterScanSupport.ScanCursorMapper) resultMapper);
    }

    /**
     * Perform a SCAN in the cluster.
     * 
     */
    static  Mono clusterScan(StatefulRedisClusterConnection connection, ScanCursor cursor,
            BiFunction, ScanCursor, Mono> scanFunction,
            ClusterScanSupport.ScanCursorMapper> mapper) {

        List nodeIds = ClusterScanSupport.getNodeIds(connection, cursor);
        String currentNodeId = ClusterScanSupport.getCurrentNodeId(cursor, nodeIds);
        ScanCursor continuationCursor = ClusterScanSupport.getContinuationCursor(cursor);

        Mono scanCursor = scanFunction.apply(connection.getConnection(currentNodeId).reactive(), continuationCursor);
        return mapper.map(nodeIds, currentNodeId, scanCursor);
    }

    private  Flux pipeliningWithMap(Map map, Function, Flux> function,
            Function, Flux> resultFunction) {

        Map> partitioned = SlotHash.partition(codec, map.keySet());

        if (partitioned.size() < 2) {
            return function.apply(map);
        }

        List> publishers = partitioned.values().stream().map(ks -> {
            Map op = new HashMap<>();
            ks.forEach(k -> op.put(k, map.get(k)));
            return function.apply(op);
        }).collect(Collectors.toList());

        return resultFunction.apply(Flux.merge(publishers));
    }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy