org.mapdb.Store Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of mapdb-nounsafe Show documentation
Show all versions of mapdb-nounsafe Show documentation
MapDB provides concurrent Maps, Sets and Queues backed by disk storage or off-heap memory. It is a fast, scalable and easy to use embedded Java database.
The newest version!
package org.mapdb;
import java.io.*;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.CRC32;
/**
*
*/
public abstract class Store implements Engine {
protected static final Logger LOG = Logger.getLogger(Store.class.getName());
protected static final long FEAT_COMP_LZF = 64L-1L;
protected static final long FEAT_ENC_XTEA = 64L-2L;
protected static final long FEAT_CRC = 64L-3L;
protected static final long HEAD_CHECKSUM = 4;
protected static final long HEAD_FEATURES = 8;
//TODO if locks are disabled, use NoLock for structuralLock and commitLock
/** protects structural layout of records. Memory allocator is single threaded under this lock */
protected final ReentrantLock structuralLock = new ReentrantLock(CC.FAIR_LOCKS);
/** protects lifecycle methods such as commit, rollback and close() */
protected final ReentrantLock commitLock =
!CC.ASSERT?
new ReentrantLock(CC.FAIR_LOCKS):
new ReentrantLock(CC.FAIR_LOCKS) {
@Override
public void lock() {
check();
super.lock();
}
@Override
public void unlock() {
super.unlock();
check();
}
private void check() {
if(structuralLock.isHeldByCurrentThread())
throw new AssertionError("Can not lock commitLock, structuralLock already locked");
for (ReadWriteLock l : locks) {
if (!(l instanceof ReentrantReadWriteLock))
return; //different locking strategy, can not tell if locked by current thread
if (((ReentrantReadWriteLock) l).isWriteLockedByCurrentThread())
throw new AssertionError("Current thread holds WriteLock, can not lock CommitLock");
}
}
};
/** protects data from being overwritten while read */
protected final ReadWriteLock[] locks;
protected final int lockScale;
protected final int lockMask;
protected volatile boolean closed = false;
protected final boolean readonly;
protected final String fileName;
protected final Volume.VolumeFactory volumeFactory;
protected final boolean checksum;
protected final boolean compress;
protected final boolean encrypt;
protected final EncryptionXTEA encryptionXTEA;
protected final ThreadLocal LZF;
protected final boolean snapshotEnable;
protected final boolean fileLockDisable;
protected final AtomicLong metricsDataWrite;
protected final AtomicLong metricsRecordWrite;
protected final AtomicLong metricsDataRead;
protected final AtomicLong metricsRecordRead;
protected final boolean deserializeExtra;
protected DataIO.HeartbeatFileLock fileLockHeartbeat;
protected final Cache[] caches;
public static final int LOCKING_STRATEGY_READWRITELOCK=0;
public static final int LOCKING_STRATEGY_WRITELOCK=1;
public static final int LOCKING_STRATEGY_NOLOCK=2;
protected Store(
String fileName,
Volume.VolumeFactory volumeFactory,
Cache cache,
int lockScale,
int lockingStrategy,
boolean checksum,
boolean compress,
byte[] password,
boolean readonly,
boolean snapshotEnable,
boolean fileLockDisable,
DataIO.HeartbeatFileLock fileLockHeartbeat) {
this.fileName = fileName;
this.volumeFactory = volumeFactory;
this.lockScale = lockScale;
this.snapshotEnable = snapshotEnable;
this.lockMask = lockScale-1;
this.fileLockDisable = fileLockDisable;
this.fileLockHeartbeat = fileLockHeartbeat;
if(fileLockHeartbeat!=null) {
fileLockHeartbeat.setQuitAfterGCed(Store.this);
}
if(Integer.bitCount(lockScale)!=1)
throw new IllegalArgumentException("Lock Scale must be power of two");
//PERF replace with incrementer on java 8
metricsDataWrite = new AtomicLong();
metricsRecordWrite = new AtomicLong();
metricsDataRead = new AtomicLong();
metricsRecordRead = new AtomicLong();
locks = new ReadWriteLock[lockScale];
for(int i=0;i< locks.length;i++){
if(lockingStrategy==LOCKING_STRATEGY_READWRITELOCK)
locks[i] = new ReentrantReadWriteLock(CC.FAIR_LOCKS);
else if(lockingStrategy==LOCKING_STRATEGY_WRITELOCK){
locks[i] = new ReadWriteSingleLock(new ReentrantLock(CC.FAIR_LOCKS));
}else if(lockingStrategy==LOCKING_STRATEGY_NOLOCK){
locks[i] = new ReadWriteSingleLock(NOLOCK);
}else{
throw new IllegalArgumentException("Illegal locking strategy: "+lockingStrategy);
}
}
if(cache==null) {
caches = null;
}else {
caches = new Cache[lockScale];
caches[0] = cache;
for (int i = 1; i < caches.length; i++) {
//each segment needs different cache, since StoreCache is not thread safe
caches[i] = cache.newCacheForOtherSegment();
}
}
this.checksum = checksum;
this.compress = compress;
this.encrypt = password!=null;
this.deserializeExtra = (this.checksum || this.encrypt || this.compress);
this.readonly = readonly;
this.encryptionXTEA = !encrypt?null:new EncryptionXTEA(password);
this.LZF = !compress?null:new ThreadLocal() {
@Override
protected CompressLZF initialValue() {
return new CompressLZF();
}
};
if(CC.LOG_STORE && LOG.isLoggable(Level.FINE)){
LOG.log(Level.FINE, "Store constructed: fileName={0}, volumeFactory={1}, cache={2}, lockScale={3}, " +
"lockingStrategy={4}, checksum={5}, compress={6}, password={7}, readonly={8}, " +
"snapshotEnable={9}, fileLockDisable={10}, fileLockHeartbeat={11}",
new Object[]{fileName, volumeFactory, cache, lockScale, lockingStrategy, checksum,
compress, (password!=null), readonly, snapshotEnable, fileLockDisable, fileLockHeartbeat});
}
}
public void init(){}
protected void checkFeaturesBitmap(final long feat){
if(CC.LOG_STORE && LOG.isLoggable(Level.FINE)) {
LOG.log(Level.FINE, "Feature Bitmap: {0}", Long.toBinaryString(feat));
}
boolean xteaEnc = (feat>>>FEAT_ENC_XTEA&1)!=0;
if(xteaEnc&& !encrypt){
throw new DBException.WrongConfig("Store was created with encryption, but no password is set in config.");
}
if(!xteaEnc&& encrypt){
throw new DBException.WrongConfig("Password is set, but store is not encrypted.");
}
boolean lzwComp = (feat>>> FEAT_COMP_LZF &1)!=0;
if(lzwComp&& !compress){
throw new DBException.WrongConfig("Store was created with compression, but no compression is enabled in config.");
}
if(!lzwComp&& compress){
throw new DBException.WrongConfig("Compression is set in config, but store was created with compression.");
}
boolean crc = (feat>>>FEAT_CRC&1)!=0;
if(crc&& !checksum){
throw new DBException.WrongConfig("Store was created with CRC32 checksum, but it is not enabled in config.");
}
if(!crc&& checksum){
throw new DBException.WrongConfig("Checksum us enabled, but store was created without it.");
}
int endZeroes = Long.numberOfTrailingZeros(feat);
if(endZeroes A get(long recid, Serializer serializer) {
if(serializer==null)
throw new NullPointerException();
if(closed)
throw new IllegalAccessError("closed");
int lockPos = lockPos(recid);
final Lock lock = locks[lockPos].readLock();
final Cache cache = caches==null ? null : caches[lockPos];
lock.lock();
try{
A o = cache==null ? null : (A) cache.get(recid);
if(o!=null) {
if(o == Cache.NULL)
o = null;
if (CC.LOG_STORE && LOG.isLoggable(Level.FINEST)) {
LOG.log(Level.FINEST, "Get from cache: recid={0}, serializer={1}, rec={2}", new Object[]{recid, serializer, o});
}
return o;
}
o = get2(recid,serializer);
if(cache!=null) {
cache.put(recid, o);
}
return o;
}finally {
lock.unlock();
}
}
protected abstract A get2(long recid, Serializer serializer);
@Override
public void update(long recid, A value, Serializer serializer) {
if(serializer==null)
throw new NullPointerException();
if(closed)
throw new IllegalAccessError("closed");
//serialize outside lock
DataIO.DataOutputByteArray out = serialize(value, serializer);
if(CC.LOG_STORE_RECORD && LOG.isLoggable(Level.FINER))
LOG.log(Level.FINER, "REC PUT recid={0}, val={1}, serializer={2}",new Object[]{recid, value, serializer});
int lockPos = lockPos(recid);
final Lock lock = locks[lockPos].writeLock();
final Cache cache = caches==null ? null : caches[lockPos];
lock.lock();
try{
if(cache!=null) {
cache.put(recid, value);
}
update2(recid,out);
}finally {
lock.unlock();
}
}
//TODO DataOutputByteArray is not thread safe, make one recycled per segment lock
protected final AtomicReference recycledDataOut =
new AtomicReference();
protected DataIO.DataOutputByteArray serialize(A value, Serializer serializer){
if(value==null)
return null;
try {
DataIO.DataOutputByteArray out = newDataOut2();
serializer.serialize(out,value);
if(out.pos>0){
if(compress){
DataIO.DataOutputByteArray tmp = newDataOut2();
tmp.ensureAvail(out.pos+40);
final CompressLZF lzf = LZF.get();
int newLen;
try{
newLen = lzf.compress(out.buf,out.pos,tmp.buf,0);
}catch(IndexOutOfBoundsException e){
newLen=0; //larger after compression
}
if(newLen>=out.pos) newLen= 0; //larger after compression
if(newLen==0){
recycledDataOut.lazySet(tmp);
//compression had no effect, so just write zero at beginning and move array by 1
out.ensureAvail(out.pos+1);
System.arraycopy(out.buf,0,out.buf,1,out.pos);
out.pos+=1;
out.buf[0] = 0;
}else{
//compression had effect, so write decompressed size and compressed array
final int decompSize = out.pos;
out.pos=0;
DataIO.packInt(out,decompSize);
out.write(tmp.buf,0,newLen);
recycledDataOut.lazySet(tmp);
}
}
if(encrypt){
int size = out.pos;
//round size to 16
if(size%EncryptionXTEA.ALIGN!=0)
size += EncryptionXTEA.ALIGN - size%EncryptionXTEA.ALIGN;
final int sizeDif=size-out.pos;
//encrypt
out.ensureAvail(sizeDif+1);
encryptionXTEA.encrypt(out.buf,0,size);
//and write diff from 16
out.pos = size;
out.writeByte(sizeDif);
}
if(checksum){
CRC32 crc = new CRC32();
crc.update(out.buf,0,out.pos);
out.writeInt((int)crc.getValue());
}
if(CC.PARANOID)try{
//check that array is the same after deserialization
DataInput inp = new DataIO.DataInputByteArray(Arrays.copyOf(out.buf, out.pos));
byte[] decompress = deserialize(Serializer.BYTE_ARRAY_NOSIZE,out.pos,inp);
DataIO.DataOutputByteArray expected = newDataOut2();
serializer.serialize(expected,value);
byte[] expected2 = Arrays.copyOf(expected.buf, expected.pos);
//check arrays equals
if(CC.ASSERT && ! (Arrays.equals(expected2,decompress)))
throw new AssertionError();
}catch(Exception e){
throw new RuntimeException(e);
}
}
metricsDataWrite.getAndAdd(out.pos);
metricsRecordWrite.incrementAndGet();
return out;
} catch (IOException e) {
throw new IOError(e);
}
}
protected DataIO.DataOutputByteArray newDataOut2() {
DataIO.DataOutputByteArray tmp = recycledDataOut.getAndSet(null);
if(tmp==null) tmp = new DataIO.DataOutputByteArray();
else tmp.pos=0;
return tmp;
}
protected A deserialize(Serializer serializer, int size, DataInput input){
try {
//PERF return future and finish deserialization outside lock, does even bring any performance bonus?
DataIO.DataInputInternal di = (DataIO.DataInputInternal) input;
if (size > 0 && deserializeExtra) {
return deserializeExtra(serializer,size,di);
}
if(!serializer.isTrusted() && !alreadyCopyedDataInput(input,size)){
//if serializer is not trusted, introduce hard boundary check, so it does not read other records data
DataIO.DataInputByteArray b = new DataIO.DataInputByteArray(new byte[size]);
input.readFully(b.buf);
input = b;
di = b;
}
int start = di.getPos();
A ret = serializer.deserialize(di, size);
if (size + start > di.getPos())
throw new DBException.DataCorruption("Data were not fully read, check your serializer. Read size:"
+(di.getPos()-start)+", expected size:"+size);
if (size + start < di.getPos())
throw new DBException.DataCorruption("Data were read beyond record size, check your serializer. Read size:"
+(di.getPos()-start)+", expected size:"+size);
metricsDataRead.getAndAdd(size);
metricsRecordRead.getAndIncrement();
return ret;
}catch(IOException e){
throw new IOError(e);
}
}
/* Some Volumes (RAF) already copy their DataInput into byte[]. */
private final boolean alreadyCopyedDataInput(DataInput input, int size){
if(!(input instanceof DataIO.DataInputByteArray))
return false;
DataIO.DataInputByteArray input2 = (DataIO.DataInputByteArray) input;
return input2.pos==0 && input2.buf.length==size;
}
/** helper method, it is called if compression or other stuff is used. It can not be JITed that well. */
private A deserializeExtra(Serializer serializer, int size, DataIO.DataInputInternal di) throws IOException {
if (checksum) {
//last two digits is checksum
size -= 4;
//read data into tmp buffer
DataIO.DataOutputByteArray tmp = newDataOut2();
tmp.ensureAvail(size);
int oldPos = di.getPos();
di.readFully(tmp.buf, 0, size);
final int checkExpected = di.readInt();
di.setPos(oldPos);
//calculate checksums
CRC32 crc = new CRC32();
crc.update(tmp.buf, 0, size);
recycledDataOut.lazySet(tmp);
int check = (int) crc.getValue();
if (check != checkExpected)
throw new IOException("Checksum does not match, data broken");
}
if (encrypt) {
DataIO.DataOutputByteArray tmp = newDataOut2();
size -= 1;
tmp.ensureAvail(size);
di.readFully(tmp.buf, 0, size);
encryptionXTEA.decrypt(tmp.buf, 0, size);
int cut = di.readUnsignedByte(); //length dif from 16bytes
di = new DataIO.DataInputByteArray(tmp.buf);
size -= cut;
}
if (compress) {
//final int origPos = di.pos;
int decompSize = DataIO.unpackInt(di);
if (decompSize == 0) {
size -= 1;
//rest of `di` is uncompressed data
} else {
DataIO.DataOutputByteArray out = newDataOut2();
out.ensureAvail(decompSize);
CompressLZF lzf = LZF.get();
//PERF copy to heap if Volume is not mapped
//argument is not needed; unpackedSize= size-(di.pos-origPos),
byte[] b = di.internalByteArray();
if (b != null) {
lzf.expand(b, di.getPos(), out.buf, 0, decompSize);
} else {
ByteBuffer bb = di.internalByteBuffer();
if (bb != null) {
lzf.expand(bb, di.getPos(), out.buf, 0, decompSize);
} else {
lzf.expand(di, out.buf, 0, decompSize);
}
}
di = new DataIO.DataInputByteArray(out.buf);
size = decompSize;
}
}
int start = di.getPos();
A ret = serializer.deserialize(di, size);
if (size + start > di.getPos())
throw new DBException.DataCorruption("Data were not fully read, check your serializer. Read size:"
+(di.getPos()-start)+", expected size:"+size);
if (size + start < di.getPos())
throw new DBException.DataCorruption("Data were read beyond record size, check your serializer. Read size:"
+(di.getPos()-start)+", expected size:"+size);
return ret;
}
protected abstract void update2(long recid, DataIO.DataOutputByteArray out);
@Override
public boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer serializer) {
if(serializer==null)
throw new NullPointerException();
if(closed)
throw new IllegalAccessError("closed");
if (CC.LOG_STORE && LOG.isLoggable(Level.FINEST)) {
LOG.log(Level.FINEST, "CAS: recid={0}, serializer={1}, expectedRec={2}, newRec={3}", new Object[]{recid, serializer, expectedOldValue, newValue});
}
//PERF binary CAS & serialize outside lock
final int lockPos = lockPos(recid);
final Lock lock = locks[lockPos].writeLock();
final Cache cache = caches==null ? null : caches[lockPos];
lock.lock();
try{
A oldVal = cache==null ? null : (A)cache.get(recid);
if(oldVal == null) {
oldVal = get2(recid, serializer);
}else if(oldVal == Cache.NULL){
oldVal = null;
}
if(oldVal==expectedOldValue || (oldVal!=null && serializer.equals(oldVal,expectedOldValue))){
update2(recid,serialize(newValue,serializer));
if(cache!=null) {
cache.put(recid, newValue);
}
return true;
}
return false;
}finally {
lock.unlock();
}
}
@Override
public void delete(long recid, Serializer serializer) {
if(serializer==null)
throw new NullPointerException();
if(closed)
throw new IllegalAccessError("closed");
if(CC.LOG_STORE_RECORD && LOG.isLoggable(Level.FINER))
LOG.log(Level.FINER, "REC DEL recid={0}, serializer={1}",new Object[]{recid, serializer});
final int lockPos = lockPos(recid);
final Lock lock = locks[lockPos].writeLock();
final Cache cache = caches==null ? null : caches[lockPos];
lock.lock();
try{
if(cache!=null) {
cache.put(recid, null);
}
delete2(recid, serializer);
}finally {
lock.unlock();
}
}
protected abstract void delete2(long recid, Serializer serializer);
protected final int lockPos(final long recid) {
int h = (int)(recid ^ (recid >>> 32));
//spread bits, so each bit becomes part of segment (lockPos)
h ^= (h<<4);
h ^= (h<<4);
h ^= (h<<4);
h ^= (h<<4);
h ^= (h<<4);
h ^= (h<<4);
h ^= (h<<4);
return h & lockMask;
}
protected void assertReadLocked(int segment) {
if(!(locks[segment] instanceof ReentrantLock))
return;
ReentrantReadWriteLock lock = (ReentrantReadWriteLock) locks[segment];
if(lock.isWriteLockedByCurrentThread())
return;
if(lock.isWriteLocked()){
throw new AssertionError();
}
if(lock.getReadHoldCount()<=0){
throw new AssertionError();
}
}
protected void assertWriteLocked(int segment) {
ReadWriteLock l = locks[segment];
if(l instanceof ReentrantReadWriteLock && !((ReentrantReadWriteLock) l).isWriteLockedByCurrentThread()){
throw new AssertionError();
}
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public boolean isReadOnly() {
return readonly;
}
/** traverses Engine wrappers and returns underlying {@link Store}*/
public static Store forDB(DB db){
return forEngine(db.engine);
}
/** traverses Engine wrappers and returns underlying {@link Store}*/
public static Store forEngine(Engine e){
Engine engine2 = e.getWrappedEngine();
if(engine2!=null)
return forEngine(engine2);
return (Store) e;
}
public abstract long getCurrSize();
public abstract long getFreeSize();
/**
*
* If underlying storage is memory-mapped-file, this method will try to
* load and precache all file data into disk cache.
* Most likely it will call {@link MappedByteBuffer#load()},
* but could also read content of entire file etc
* This method will not pin data into memory, they might be removed at any time.
*
*
* @return true if this method did something, false if underlying storage does not support loading,
* or is already in-memory
*/
public abstract boolean fileLoad();
@Override
public void clearCache() {
if(closed)
throw new IllegalAccessError("closed");
if (CC.LOG_STORE && LOG.isLoggable(Level.FINE)) {
LOG.log(Level.FINE, "Clear Cache");
}
if(caches==null)
return;
for(int i=0;i map) {
map.put(DB.METRICS_DATA_WRITE,metricsDataWrite.getAndSet(0));
map.put(DB.METRICS_RECORD_WRITE,metricsRecordWrite.getAndSet(0));
map.put(DB.METRICS_DATA_READ,metricsDataRead.getAndSet(0));
map.put(DB.METRICS_RECORD_READ,metricsRecordRead.getAndSet(0));
long cacheHit = 0;
long cacheMiss = 0;
if(caches!=null) {
for (Cache c : caches) {
cacheHit += c.metricsCacheHit();
cacheMiss += c.metricsCacheMiss();
}
}
map.put(DB.METRICS_CACHE_HIT,cacheHit);
map.put(DB.METRICS_CACHE_MISS, cacheMiss);
}
public abstract void backup(OutputStream out, boolean incremental);
public abstract void backupRestore(InputStream[] in);
/**
* Cache implementation, part of {@link Store} class.
*/
public static abstract class Cache {
protected final Lock lock;
protected long cacheHitCounter = 0;
protected long cacheMissCounter = 0;
protected static final Object NULL = new Object();
public Cache(boolean disableLocks) {
this.lock = disableLocks?null: new ReentrantLock(CC.FAIR_LOCKS);
}
public abstract Object get(long recid);
public abstract void put(long recid, Object item);
public abstract void clear();
public abstract void close();
public abstract Cache newCacheForOtherSegment();
/** how many times was cache hit, also reset counter */
public long metricsCacheHit() {
Lock lock = this.lock;
if(lock!=null)
lock.lock();
try {
long ret = cacheHitCounter;
cacheHitCounter=0;
return ret;
}finally {
if(lock!=null)
lock.unlock();
}
}
/** how many times was cache miss, also reset counter */
public long metricsCacheMiss() {
Lock lock = this.lock;
if(lock!=null)
lock.lock();
try {
long ret = cacheMissCounter;
cacheMissCounter=0;
return ret;
}finally {
if(lock!=null)
lock.unlock();
}
}
/**
*
* Fixed size cache which uses hash table.
* Is thread-safe and requires only minimal locking.
* Items are randomly removed and replaced by hash collisions.
*
* This is simple, concurrent, small-overhead, random cache.
*
*
* @author Jan Kotek
*/
public static final class HashTable extends Cache {
protected final long[] recids; //TODO 6 byte longs
protected final Object[] items;
protected final int cacheMaxSizeMask;
public HashTable(int cacheMaxSize, boolean disableLocks) {
super(disableLocks);
cacheMaxSize = DataIO.nextPowTwo(cacheMaxSize); //next pow of two
this.cacheMaxSizeMask = cacheMaxSize-1;
this.recids = new long[cacheMaxSize];
this.items = new Object[cacheMaxSize];
}
@Override
public Object get(long recid) {
int pos = pos(recid);
Lock lock = this.lock;
if(lock!=null)
lock.lock();
try {
boolean hit = recids[pos] == recid;
if(hit){
if(CC.METRICS_CACHE)
cacheHitCounter++;
return items[pos];
}else{
if(CC.METRICS_CACHE)
cacheMissCounter++;
return null;
}
}finally {
if(lock!=null)
lock.unlock();
}
}
@Override
public void put(long recid, Object item) {
if(item == null)
item = NULL;
int pos = pos(recid);
Lock lock = this.lock;
if(lock!=null)
lock.lock();
try {
recids[pos] = recid;
items[pos] = item;
}finally {
if(lock!=null)
lock.unlock();
}
}
protected int pos(long recid) {
return DataIO.longHash(recid)&cacheMaxSizeMask;
}
@Override
public void clear() {
Lock lock = this.lock;
if(lock!=null)
lock.lock();
try {
Arrays.fill(recids, 0L);
Arrays.fill(items, null);
}finally {
if(lock!=null)
lock.unlock();
}
}
@Override
public void close() {
clear();
}
@Override
public Cache newCacheForOtherSegment() {
return new HashTable(recids.length,lock==null);
}
}
/**
* Instance cache which uses SoftReference
or WeakReference
* Items can be removed from cache by Garbage Collector if
*
* @author Jan Kotek
*/
public static class WeakSoftRef extends Store.Cache {
protected interface CacheItem{
long getRecid();
Object get();
void clear();
}
protected static final class CacheWeakItem extends WeakReference implements CacheItem {
final long recid;
public CacheWeakItem(A referent, ReferenceQueue q, long recid) {
super(referent, q);
this.recid = recid;
}
@Override
public long getRecid() {
return recid;
}
}
protected static final class CacheSoftItem extends SoftReference implements CacheItem {
final long recid;
public CacheSoftItem(A referent, ReferenceQueue q, long recid) {
super(referent, q);
this.recid = recid;
}
@Override
public long getRecid() {
return recid;
}
}
protected ReferenceQueue
© 2015 - 2025 Weber Informatics LLC | Privacy Policy