/**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.google.common.base.Strings;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils;
import com.netflix.dyno.jedis.JedisConnectionFactory.JedisConnection;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
import redis.clients.jedis.commands.BinaryRedisPipeline;
import redis.clients.jedis.commands.RedisPipeline;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import javax.annotation.concurrent.NotThreadSafe;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import static com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.CompressionStrategy;
@NotThreadSafe
public class DynoJedisPipeline implements RedisPipeline, BinaryRedisPipeline, AutoCloseable {
private static final Logger Logger = LoggerFactory.getLogger(DynoJedisPipeline.class);
// ConnPool and connection to exec the pipeline
private final ConnectionPoolImpl connPool;
private volatile Connection connection;
private final DynoJedisPipelineMonitor opMonitor;
private final ConnectionPoolMonitor cpMonitor;
// the cached pipeline
private volatile Pipeline jedisPipeline = null;
// the cached row key for the pipeline. all subsequent requests to pipeline
// must be the same. this is used to check that.
private final AtomicReference theKey = new AtomicReference(null);
private final AtomicReference theBinaryKey = new AtomicReference(null);
private final AtomicReference hashtag = new AtomicReference(null);
// used for tracking errors
private final AtomicReference pipelineEx = new AtomicReference(null);
private static final String DynoPipeline = "DynoPipeline";
DynoJedisPipeline(ConnectionPoolImpl cPool, DynoJedisPipelineMonitor operationMonitor,
ConnectionPoolMonitor connPoolMonitor) {
this.connPool = cPool;
this.opMonitor = operationMonitor;
this.cpMonitor = connPoolMonitor;
}
private void pipelined(final byte[] key) {
try {
try {
connection = connPool.getConnectionForOperation(new BaseOperation() {
@Override
public String getName() {
return DynoPipeline;
}
@Override
public String getStringKey() {// we do not use it in this context
return null;
}
@Override
public byte[] getBinaryKey() {
return key;
}
});
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
Jedis jedis = ((JedisConnection) connection).getClient();
jedisPipeline = jedis.pipelined();
cpMonitor.incOperationSuccess(connection.getHost(), 0);
}
private void pipelined(final String key) {
try {
try {
connection = connPool.getConnectionForOperation(new BaseOperation() {
@Override
public String getName() {
return DynoPipeline;
}
@Override
public String getStringKey() {
return key;
}
@Override
public byte[] getBinaryKey() { // we do not use it in this context
return null;
}
});
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
Jedis jedis = ((JedisConnection) connection).getClient();
jedisPipeline = jedis.pipelined();
cpMonitor.incOperationSuccess(connection.getHost(), 0);
}
private void checkHashtag(final String key, final String hashtagValue) {
if (this.hashtag.get() != null) {
verifyHashtagValue(hashtagValue);
} else {
boolean success = this.hashtag.compareAndSet(null, hashtagValue);
if (!success) {
verifyHashtagValue(hashtagValue);
} else {
pipelined(key);
}
}
}
/**
* Checks that a pipeline is associated with a single key. Binary keys do not
* support hashtags.
*
* @param key
*/
private void checkKey(final byte[] key) {
if (theBinaryKey.get() != null) {
verifyKey(key);
} else {
boolean success = theBinaryKey.compareAndSet(null, key);
if (!success) {
// someone already beat us to it. that's fine, just verify
// that the key is the same
verifyKey(key);
} else {
pipelined(key);
}
}
}
/**
* Checks that a pipeline is associated with a single key. If there is a hashtag
* defined in the first host of the connectionpool then we check that first.
*
* @param key
*/
private void checkKey(final String key) {
/*
* Get hashtag from the first host of the active pool We cannot use the
* connection object because as of now we have not selected a connection. A
* connection is selected based on the key or hashtag respectively.
*/
String hashtag = connPool.getConfiguration().getHashtag();
if (hashtag == null || hashtag.isEmpty()) {
if (theKey.get() != null) {
verifyKey(key);
} else {
boolean success = theKey.compareAndSet(null, key);
if (!success) {
// someone already beat us to it. that's fine, just verify
// that the key is the same
verifyKey(key);
} else {
pipelined(key);
}
}
} else {
/*
* We have a identified a hashtag in the Host object. That means Dynomite has a
* defined hashtag. Producing the hashvalue out of the hashtag and using that as
* a reference to the pipeline
*/
String hashValue = StringUtils.substringBetween(key, Character.toString(hashtag.charAt(0)),
Character.toString(hashtag.charAt(1)));
if (Strings.isNullOrEmpty(hashValue)) {
hashValue = key;
}
checkHashtag(key, hashValue);
}
}
/**
* Verifies binary key with pipeline binary key
*/
private void verifyKey(final byte[] key) {
if (!theBinaryKey.get().equals(key)) {
try {
throw new RuntimeException("Must have same key for Redis Pipeline in Dynomite. This key: " + key);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
/**
* Verifies key with pipeline key
*/
private void verifyKey(final String key) {
if (!theKey.get().equals(key)) {
try {
throw new RuntimeException("Must have same key for Redis Pipeline in Dynomite. This key: " + key);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
private void verifyHashtagValue(final String hashtagValue) {
if (!this.hashtag.get().equals(hashtagValue)) {
try {
throw new RuntimeException(
"Must have same hashtag for Redis Pipeline in Dynomite. This hashvalue: " + hashtagValue);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
private String decompressValue(String value) {
try {
if (ZipUtils.isCompressed(value)) {
return ZipUtils.decompressFromBase64String(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress value [" + value + "]");
}
return value;
}
private byte[] decompressValue(byte[] value) {
try {
if (ZipUtils.isCompressed(value)) {
return ZipUtils.decompressBytesNonBase64(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress byte array value [" + value + "]");
}
return value;
}
/**
* As long as jdk 7 and below is supported we need to define our own function
* interfaces
*/
private interface Func0 {
R call();
}
public class PipelineResponse extends Response {
private Response response;
public PipelineResponse(Builder b) {
super(BuilderFactory.STRING);
}
public PipelineResponse apply(Func0 extends Response> f) {
this.response = f.call();
return this;
}
@Override
public String get() {
return decompressValue(response.get());
}
}
public class PipelineLongResponse extends Response {
private Response response;
public PipelineLongResponse(Builder b) {
super(b);
}
public PipelineLongResponse apply(Func0 extends Response> f) {
this.response = f.call();
return this;
}
}
public class PipelineListResponse extends Response> {
private Response> response;
public PipelineListResponse(Builder b) {
super(BuilderFactory.STRING_LIST);
}
public PipelineListResponse apply(Func0 extends Response>> f) {
this.response = f.call();
return this;
}
@Override
public List get() {
return new ArrayList(
CollectionUtils.transform(response.get(), new CollectionUtils.Transform() {
@Override
public String get(String s) {
return decompressValue(s);
}
}));
}
}
public class PipelineBinaryResponse extends Response {
private Response response;
public PipelineBinaryResponse(Builder b) {
super(BuilderFactory.BYTE_ARRAY);
}
public PipelineBinaryResponse apply(Func0 extends Response> f) {
this.response = f.call();
return this;
}
@Override
public byte[] get() {
return decompressValue(response.get());
}
}
public class PipelineMapResponse extends Response