org.jgroups.blocks.ReplCache Maven / Gradle / Ivy
package org.jgroups.blocks;
import org.jgroups.*;
import org.jgroups.annotations.ManagedAttribute;
import org.jgroups.annotations.ManagedOperation;
import org.jgroups.logging.Log;
import org.jgroups.logging.LogFactory;
import org.jgroups.util.*;
import java.io.*;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* Cache which allows for replication factors per data items; the factor determines how many replicas
* of a key/value we create across the cluster.
* See doc/design/ReplCache.txt for details.
* @author Bela Ban
*/
public class ReplCache implements Receiver, Cache.ChangeListener {
/** The cache in which all entries are located. The value is a tuple, consisting of the replication count and the
* actual value */
private Cache> l2_cache=new Cache<>();
/** The local bounded cache, to speed up access to frequently accessed entries. Can be disabled or enabled */
private Cache l1_cache=null;
private static final Log log=LogFactory.getLog(ReplCache.class);
private JChannel ch=null;
private Address local_addr;
private View view;
private RpcDispatcher disp;
@ManagedAttribute(writable=true)
private String props="udp.xml";
@ManagedAttribute(writable=true)
private String cluster_name="ReplCache-Cluster";
@ManagedAttribute(writable=true)
private long call_timeout=1000L;
@ManagedAttribute(writable=true)
private long caching_time=30000L; // in milliseconds. -1 means don't cache, 0 means cache forever (or until changed)
@ManagedAttribute
private short default_replication_count=1; // no replication by default
private HashFunction hash_function=null;
private HashFunctionFactory hash_function_factory=ConsistentHashFunction::new;
private final Set receivers=new HashSet<>();
private final Set change_listeners=new HashSet<>();
/** On a view change, if a member P1 detects that for any given key K, P1 is not the owner of K, then
* it will compute the new owner P2 and transfer ownership for all Ks for which P2 is the new owner. P1
* will then also evict those keys from its L2 cache */
@ManagedAttribute(writable=true)
private boolean migrate_data=true;
private static final short PUT = 1;
private static final short PUT_FORCE = 2;
private static final short GET = 3;
private static final short REMOVE = 4;
private static final short REMOVE_MANY = 5;
protected static final Map methods=Util.createConcurrentMap(8);
private TimeScheduler timer;
static {
try {
methods.put(PUT, ReplCache.class.getMethod("_put",
Object.class,
Object.class,
short.class,
long.class));
methods.put(PUT_FORCE, ReplCache.class.getMethod("_put",
Object.class,
Object.class,
short.class,
long.class, boolean.class));
methods.put(GET, ReplCache.class.getMethod("_get",
Object.class));
methods.put(REMOVE, ReplCache.class.getMethod("_remove", Object.class));
methods.put(REMOVE_MANY, ReplCache.class.getMethod("_removeMany", Set.class));
}
catch(NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
public interface HashFunction {
/**
* Function that, given a key and a replication count, returns replication_count number of different
* addresses of nodes.
* @param key
* @param replication_count
* @return
*/
List hash(K key, short replication_count);
/**
* When the topology changes, this method will be called. Implementations will typically cache the node list
* @param nodes
*/
void installNodes(List nodes);
}
public interface HashFunctionFactory {
HashFunction create();
}
public ReplCache(String props, String cluster_name) {
this.props=props;
this.cluster_name=cluster_name;
}
public String getProps() {
return props;
}
public void setProps(String props) {
this.props=props;
}
public Address getLocalAddress() {
return local_addr;
}
@ManagedAttribute
public String getLocalAddressAsString() {
return local_addr != null? local_addr.toString() : "null";
}
@ManagedAttribute
public String getView() {
return view != null? view.toString() : "null";
}
@ManagedAttribute
public int getClusterSize() {
return view != null? view.size() : 0;
}
@ManagedAttribute
public boolean isL1CacheEnabled() {
return l1_cache != null;
}
public String getClusterName() {
return cluster_name;
}
public void setClusterName(String cluster_name) {
this.cluster_name=cluster_name;
}
public long getCallTimeout() {
return call_timeout;
}
public void setCallTimeout(long call_timeout) {
this.call_timeout=call_timeout;
}
public long getCachingTime() {
return caching_time;
}
public void setCachingTime(long caching_time) {
this.caching_time=caching_time;
}
public boolean isMigrateData() {
return migrate_data;
}
public void setMigrateData(boolean migrate_data) {
this.migrate_data=migrate_data;
}
public short getDefaultReplicationCount() {
return default_replication_count;
}
public void setDefaultReplicationCount(short default_replication_count) {
this.default_replication_count=default_replication_count;
}
public HashFunction getHashFunction() {
return hash_function;
}
public void setHashFunction(HashFunction hash_function) {
this.hash_function=hash_function;
}
public HashFunctionFactory getHashFunctionFactory() {
return hash_function_factory;
}
public void setHashFunctionFactory(HashFunctionFactory hash_function_factory) {
this.hash_function_factory=hash_function_factory;
}
public void addReceiver(Receiver r) {
receivers.add(r);
}
public void removeMembershipListener(Receiver r) {
receivers.remove(r);
}
public void addChangeListener(ChangeListener l) {
change_listeners.add(l);
}
public void removeChangeListener(ChangeListener l) {
change_listeners.remove(l);
}
public Cache getL1Cache() {
return l1_cache;
}
public void setL1Cache(Cache cache) {
if(l1_cache != null)
l1_cache.stop();
l1_cache=cache;
}
public Cache> getL2Cache() {
return l2_cache;
}
public void setL2Cache(Cache> cache) {
if(cache != null) {
l2_cache.stop();
l2_cache=cache;
}
}
@ManagedOperation
public void start() throws Exception {
if(hash_function_factory != null) {
hash_function=hash_function_factory.create();
}
if(hash_function == null)
hash_function=new ConsistentHashFunction<>();
ch=new JChannel(props);
disp=new RpcDispatcher(ch, this).setMethodLookup(methods::get).setReceiver(this);
ch.connect(cluster_name);
local_addr=ch.getAddress();
view=ch.getView();
timer=ch.getProtocolStack().getTransport().getTimer();
l2_cache.addChangeListener(this);
}
@ManagedOperation
public void stop() {
if(l1_cache != null)
l1_cache.stop();
if(migrate_data) {
List members_without_me=new ArrayList<>(view.getMembers());
members_without_me.remove(local_addr);
HashFunction tmp_hash_function=hash_function_factory.create();
tmp_hash_function.installNodes(members_without_me);
for(Map.Entry>> entry: l2_cache.entrySet()) {
K key=entry.getKey();
Cache.Value> val=entry.getValue();
if(val == null)
continue;
Value tmp=val.getValue();
if(tmp == null)
continue;
short repl_count=tmp.getReplicationCount();
if(repl_count != 1) // we only handle keys which are not replicated and which are stored by us
continue;
List nodes=tmp_hash_function.hash(key, repl_count);
if(nodes == null || nodes.isEmpty())
continue;
if(!nodes.contains(local_addr)) {
Address dest=nodes.get(0); // should only have 1 element anyway
move(dest, key, tmp.getVal(), repl_count, val.getTimeout(), true);
_remove(key);
}
}
}
l2_cache.removeChangeListener(this);
l2_cache.stop();
disp.stop();
ch.close();
}
/**
* Places a key/value pair into one or several nodes in the cluster.
* @param key The key, needs to be serializable
* @param val The value, needs to be serializable
* @param repl_count Number of replicas. The total number of times a data item should be present in a cluster.
* Needs to be > 0
*
* - -1: create key/val in all the nodes in the cluster
*
- 1: create key/val only in one node in the cluster, picked by computing the consistent hash of KEY
*
- K > 1: create key/val in those nodes in the cluster which match the consistent hashes created for KEY
*
* @param timeout Expiration time for key/value.
*
* - -1: don't cache at all in the L1 cache
*
- 0: cache forever, until removed or evicted because we need space for newer elements
*
- > 0: number of milliseconds to keep an idle element in the cache. An element is idle when not accessed.
*
* @param synchronous Whether or not to block until all cluster nodes have applied the change
*/
@ManagedOperation
public void put(K key, V val, short repl_count, long timeout, boolean synchronous) {
if(repl_count == 0) {
if(log.isWarnEnabled())
log.warn("repl_count of 0 is invalid, data will not be stored in the cluster");
return;
}
mcastPut(key, val, repl_count, timeout, synchronous);
if(l1_cache != null && timeout >= 0)
l1_cache.put(key, val, timeout);
}
/**
* Places a key/value pair into one or several nodes in the cluster.
* @param key The key, needs to be serializable
* @param val The value, needs to be serializable
* @param repl_count Number of replicas. The total number of times a data item should be present in a cluster.
* Needs to be > 0
*
* - -1: create key/val in all the nodes in the cluster
*
- 1: create key/val only in one node in the cluster, picked by computing the consistent hash of KEY
*
- K > 1: create key/val in those nodes in the cluster which match the consistent hashes created for KEY
*
* @param timeout Expiration time for key/value.
*
* - -1: don't cache at all in the L1 cache
*
- 0: cache forever, until removed or evicted because we need space for newer elements
*
- > 0: number of milliseconds to keep an idle element in the cache. An element is idle when not accessed.
*
*/
@ManagedOperation
public void put(K key, V val, short repl_count, long timeout) {
put(key, val, repl_count, timeout, false); // don't block (asynchronous put) by default
}
@ManagedOperation
public void put(K key, V val) {
put(key, val, default_replication_count, caching_time);
}
/**
* Returns the value associated with key
* @param key The key, has to be serializable
* @return The value associated with key, or null
*/
@ManagedOperation
public V get(K key) {
// 1. Try the L1 cache first
if(l1_cache != null) {
V val=l1_cache.get(key);
if(val != null) {
if(log.isTraceEnabled())
log.trace("returned value " + val + " for " + key + " from L1 cache");
return val;
}
}
// 2. Try the local cache
Cache.Value> val=l2_cache.getEntry(key);
Value tmp;
if(val != null) {
tmp=val.getValue();
if(tmp !=null) {
V real_value=tmp.getVal();
if(real_value != null && l1_cache != null && val.getTimeout() >= 0)
l1_cache.put(key, real_value, val.getTimeout());
return tmp.getVal();
}
}
// 3. Execute a cluster wide GET
try {
RspList
© 2015 - 2025 Weber Informatics LLC | Privacy Policy