org.iherus.shiro.cache.redis.connection.AbstractRedisConnection Maven / Gradle / Ivy
/**
* Copyright (c) 2016-2019, Bosco.Liao ([email protected]).
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.iherus.shiro.cache.redis.connection;
import static org.iherus.shiro.util.Utils.isBlank;
import static org.iherus.shiro.util.Utils.isEmpty;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* RedisConnection_super_class
*
* @author Bosco.Liao
* @since 2.0.0
*/
public abstract class AbstractRedisConnection {
private static final Logger logger = LoggerFactory.getLogger(AbstractRedisConnection.class);
public static final String EMPTY_STRING = "";
private static final String SERVER_VERSION_PATTERN = "redis_version:(.*?)\\r\\n";
private volatile static ExecutorService executor;
protected static ExecutorService defaultExecutor() {
if (executor == null) {
synchronized (AbstractRedisConnection.class) {
if (executor == null) {
executor = createDefaultExecutor();
}
}
}
return executor;
}
/**
* 创建默认线程池
*/
private static ExecutorService createDefaultExecutor() {
int coreThreads = Runtime.getRuntime().availableProcessors();
int queueSize = 2 << 9; // 1024
return new ThreadPoolExecutor(coreThreads, coreThreads * 2,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue(queueSize),
new ControllableRunsPolicy());
}
/**
* 解析当前Redis-Server版本,不支持Redis跨版本集群解析
*
* @param content redis-server-info
* @return Redis-Server 版本号
*/
protected String parseServerVersion(String content) {
if (isBlank(content)) return EMPTY_STRING;
Pattern pattern = Pattern.compile(SERVER_VERSION_PATTERN);
Matcher matcher = pattern.matcher(content);
try {
while (matcher.find()) {
return matcher.group(1);
}
} catch (Exception e) {
// ignored
}
return EMPTY_STRING;
}
/**
* 单机批量删除
*
* @param batchSize 每批次处理的数量
* @param keys 待处理的keys
* @param executor 执行删除动作的执行器
* @return 已删除的数量
*/
protected Long batchDeleteOnStandalone(int batchSize, byte[][] keys, Function executor) {
List keyList = Arrays.asList(keys);
long count = 0L;
int index = 0;
int batchCount = keys.length % batchSize == 0 ? (keys.length / batchSize) : (keys.length / batchSize + 1);
for (int i = 0; i < batchCount; i++) {
int batchSizeToUse = Math.min(keys.length - index, batchSize);
List batchKeyList = keyList.subList(index, index + batchSizeToUse);
index += batchSizeToUse;
byte[][] batchKeys = batchKeyList.toArray(new byte[batchKeyList.size()][]);
count += Optional.ofNullable(executor.apply(batchKeys)).orElse(0L).longValue();
}
return count;
}
/**
* 单机批量获取,分批次执行MGET操作
*
* @param batchSize 每批次处理的数量
* @param keys 待处理的keys
* @param executor 执行MGET动作的执行器
* @return 获取到的值集合
*/
protected List batchGetOnStandalone(int batchSize, byte[][] keys,
Function> executor) {
List keyList = Arrays.asList(keys);
int index = 0;
int batchCount = keys.length % batchSize == 0 ? (keys.length / batchSize) : (keys.length / batchSize + 1);
List values = new ArrayList(keys.length);
for (int i = 0; i < batchCount; i++) {
int batchSizeToUse = Math.min(keys.length - index, batchSize);
List batchKeyList = keyList.subList(index, index + batchSizeToUse);
index += batchSizeToUse;
byte[][] batchKeys = batchKeyList.toArray(new byte[batchKeyList.size()][]);
values.addAll(Optional.ofNullable(executor.apply(batchKeys)).orElse(Collections.emptyList()));
}
return Collections.unmodifiableList(values);
}
/**
* 集群批量删除
*
* @param batchSize 每批次处理的数量
* @param keys 待处理的keys
* @param executor 执行删除动作的执行器
* @param calculator slot计算器
* @return 已删除的数量
*/
protected Long batchDeleteOnCluster(int batchSize, byte[][] keys, Function executor,
Function calculator) {
Map> keysMap = getClassifiedKeys(calculator, keys);
if (keysMap.isEmpty()) return 0L;
final AtomicLong size = new AtomicLong(0L);
keysMap.forEach((slot, keySet) -> {
byte[][] keysOfSlot = keySet.toArray(new byte[keySet.size()][]);
size.getAndAdd(Optional.ofNullable(executor.apply(keysOfSlot)).orElse(0L).longValue());
});
return size.get();
}
/**
* 集群批量获取,按slot分类后,分批执行MGET操作
*
* @param batchSize 每批次处理的数量
* @param keys 待处理的keys
* @param executor 执行MGET动作的执行器
* @param calculator slot计算器
* @return 获取到的值集合
*/
protected List batchGetOnCluster(int batchSize, byte[][] keys, Function> executor,
Function calculator) {
Map> keysMap = getClassifiedKeys(calculator, keys);
if (keysMap.isEmpty())
return Collections.emptyList();
final List values = new ArrayList(keys.length);
keysMap.forEach((slot, keySet) -> {
byte[][] keysOfSlot = keySet.toArray(new byte[keySet.size()][]);
values.addAll(batchGetOnStandalone(batchSize, keysOfSlot, executor));
});
return Collections.unmodifiableList(values);
}
/**
* Keys安装slot分类
*
* @param calculator slot计算器
* @param keys 未分类的keys
* @return 已分类的keys
*/
protected Map> getClassifiedKeys(Function calculator, byte[]... keys) {
Map> keysMap = new HashMap>();
if (!isEmpty(keys)) {
for (byte[] key : keys) {
if (isEmpty(key))
continue;
int slot = Optional.ofNullable(calculator.apply(key)).orElse(0).intValue();
if (keysMap.containsKey(slot)) {
keysMap.get(slot).add(key);
} else {
keysMap.put(slot, new HashSet(Collections.singleton(key)));
}
}
}
return Collections.unmodifiableMap(keysMap);
}
/**
* 并发扫描
*
* @param forkExecutor 负责任务分发,最终需要返回已分发的任务总数
* @return 匹配扫描的全部Keys
*/
protected Set distributionScanKeys(Function>, Integer> forkExecutor) {
Set keys = new HashSet();
CompletionService> completionService = new ExecutorCompletionService>(getExecutor());
int taskSize = Optional.ofNullable(forkExecutor.apply(completionService)).orElse(0).intValue();
for (int i = 0; i < taskSize; i++) {
Set keysOfNode = Collections.emptySet();
try {
keysOfNode = completionService.take().get();
} catch (Exception e) {
logger.warn("Redis cluster's keys scan sub-threads cannot execute normally", e);
}
keys.addAll(keysOfNode);
}
return Collections.unmodifiableSet(keys);
}
/**
* 可自由定制
*/
protected ExecutorService getExecutor() {
return defaultExecutor();
}
/**
* @since 2.1.0
*/
static class ControllableRunsPolicy implements RejectedExecutionHandler {
private final float memoryBarrierFactor = 0.85f;
private final AtomicLong counter = new AtomicLong(0);
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
Runtime runtime = Runtime.getRuntime();
long barrier = (long) (runtime.maxMemory() * memoryBarrierFactor);
if (runtime.totalMemory() < barrier) {
Thread t = new Thread(r, "T-Shiro-Redis-Rejected-RunAlone-" + counter.incrementAndGet());
if (logger.isInfoEnabled()) {
logger.info("Thread [ {} ] runs in the reject policy.", t.getName());
}
t.start();
} else {
/**
* Executes task r in the caller's thread, unless the executorhas been shut
* down, in which case the task is discarded.
*/
if (!executor.isShutdown()) {
r.run();
}
}
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy