com.gemstone.gemfire.internal.cache.wan.serial.SerialGatewaySenderQueue Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-core Show documentation
Show all versions of gemfire-core Show documentation
SnappyData store based off Pivotal GemFireXD
The newest version!
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache.wan.serial;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.gemstone.gemfire.CancelException;
import com.gemstone.gemfire.SystemFailure;
import com.gemstone.gemfire.cache.AttributesFactory;
import com.gemstone.gemfire.cache.AttributesMutator;
import com.gemstone.gemfire.cache.CacheException;
import com.gemstone.gemfire.cache.CacheListener;
import com.gemstone.gemfire.cache.CacheWriterException;
import com.gemstone.gemfire.cache.DataPolicy;
import com.gemstone.gemfire.cache.DiskStore;
import com.gemstone.gemfire.cache.DiskStoreFactory;
import com.gemstone.gemfire.cache.EntryNotFoundException;
import com.gemstone.gemfire.cache.EvictionAction;
import com.gemstone.gemfire.cache.EvictionAttributes;
import com.gemstone.gemfire.cache.Operation;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.RegionAttributes;
import com.gemstone.gemfire.cache.RegionDestroyedException;
import com.gemstone.gemfire.cache.Scope;
import com.gemstone.gemfire.cache.TimeoutException;
import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
import com.gemstone.gemfire.cache.util.GatewayQueueAttributes;
import com.gemstone.gemfire.cache.wan.GatewaySender;
import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.cache.CachedDeserializable;
import com.gemstone.gemfire.internal.cache.Conflatable;
import com.gemstone.gemfire.internal.cache.DistributedRegion;
import com.gemstone.gemfire.internal.cache.EntryEventImpl;
import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
import com.gemstone.gemfire.internal.cache.InternalRegionArguments;
import com.gemstone.gemfire.internal.cache.LocalRegion;
import com.gemstone.gemfire.internal.cache.RegionEventImpl;
import com.gemstone.gemfire.internal.cache.RegionQueue;
import com.gemstone.gemfire.internal.cache.Token;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
import com.gemstone.gemfire.internal.cache.versions.VersionSource;
import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
import com.gemstone.gemfire.internal.cache.wan.GatewaySenderStats;
import com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderImpl;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.offheap.OffHeapRegionEntryHelper;
import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
import com.gemstone.gemfire.internal.size.SingleObjectSizer;
/**
* @author Suranjan Kumar
* @author Yogesh Mahajan
* @since 7.0
*
*/
public class SerialGatewaySenderQueue implements RegionQueue {
/**
* The key into the Region
used when taking entries from the
* queue. This value is either set when the queue is instantiated or read from
* the Region
in the case where this queue takes over where a
* previous one left off.
*/
private long headKey = -1;
/**
* The key into the Region
used when putting entries onto the
* queue. This value is either set when the queue is instantiated or read from
* the Region
in the case where this queue takes over where a
* previous one left off.
*/
private final AtomicLong tailKey = new AtomicLong();
/**
* The current key used to do put into the region. Once put is complete, then
* the {@link #tailKey} is reconciled with this value.
*/
private long currentKey;
private final Deque peekedIds = new LinkedBlockingDeque();
/**
* The name of the Region
backing this queue
*/
private final String regionName;
/**
* The Region
backing this queue
*/
private Region region;
/**
* The name of the DiskStore
to overflow this queue
*/
private String diskStoreName;
/**
* The maximum number of entries in a batch.
*/
private final int batchSize;
/**
* The maximum amount of memory (MB) to allow in the queue before overflowing
* entries to disk
*/
private int maximumQueueMemory;
/**
* Whether conflation is enabled for this queue.
*/
private final boolean enableConflation;
/**
* Whether persistence is enabled for this queue.
*/
private boolean enablePersistence;
/**
* Whether write to disk is synchronous.
*/
private boolean isDiskSynchronous;
/**
* The Map
mapping the regionName->key to the queue key. This
* index allows fast updating of entries in the queue for conflation.
*/
private final Map> indexes;
/**
* The LogWriterI18n
used by this queue
*/
private final LogWriterI18n logger;
private final GatewaySenderStats stats;
/**
* The maximum allowed key before the keys are rolled over
*/
private static final long MAXIMUM_KEY = Long.MAX_VALUE;
/**
* Whether the Gateway
queue should be no-ack instead of ack.
*/
private static final boolean NO_ACK = Boolean
.getBoolean("gemfire.gateway-queue-no-ack");
private volatile long lastDispatchedKey = -1;
private volatile long lastDestroyedKey = -1;
public static final int DEFAULT_MESSAGE_SYNC_INTERVAL = 1;
private static volatile int messageSyncInterval = DEFAULT_MESSAGE_SYNC_INTERVAL;
private BatchRemovalThread removalThread = null;
private final boolean keyPutNoSync;
private final int maxPendingPuts;
private final PriorityQueue pendingPuts;
private SerialGatewaySenderImpl sender = null;
public SerialGatewaySenderQueue(AbstractGatewaySender abstractSender,
String regionName, CacheListener listener) {
// The queue starts out with headKey and tailKey equal to -1 to force
// them to be initialized from the region.
this.logger = abstractSender.getLogger();
this.regionName = regionName;
this.headKey = -1;
this.tailKey.set(-1);
this.currentKey = -1;
this.indexes = new HashMap>();
this.enableConflation = abstractSender.isBatchConflationEnabled();
this.diskStoreName = abstractSender.getDiskStoreName();
this.batchSize = abstractSender.getBatchSize();
this.enablePersistence = abstractSender.isPersistenceEnabled();
if (this.enablePersistence) {
this.isDiskSynchronous = abstractSender.isDiskSynchronous();
} else {
this.isDiskSynchronous = false;
}
if (Boolean.getBoolean("gemfire.gateway-queue-sync")) {
this.keyPutNoSync = false;
this.maxPendingPuts = 0;
this.pendingPuts = null;
}
else {
this.keyPutNoSync = true;
this.maxPendingPuts = Math.max(this.batchSize, 100);
this.pendingPuts = new PriorityQueue(this.maxPendingPuts + 5);
}
this.maximumQueueMemory = abstractSender.getMaximumMemeoryPerDispatcherQueue();
this.stats = abstractSender.getStatistics();
initializeRegion(abstractSender, listener);
this.removalThread = new BatchRemovalThread((GemFireCacheImpl)abstractSender.getCache());
this.removalThread.start();
this.sender = (SerialGatewaySenderImpl) abstractSender;
if (this.logger.fineEnabled()) {
this.logger.fine(this + ": Contains " + size() + " elements");
}
}
public Region getRegion() {
return this.region;
}
public void destroy() {
getRegion().localDestroyRegion();
}
public void put(Object event) throws CacheException {
GatewaySenderEventImpl eventImpl = (GatewaySenderEventImpl)event;
final Region r = eventImpl.getRegion();
boolean isPDXRegion = (r instanceof DistributedRegion
&& ((DistributedRegion)r).isPdxTypesRegion());
final boolean isWbcl = this.regionName
.startsWith(AsyncEventQueueImpl.ASYNC_EVENT_QUEUE_PREFIX);
if (!(isPDXRegion && isWbcl)) {
if (this.keyPutNoSync) {
putAndGetKeyNoSync(event);
}
else {
synchronized (this) {
putAndGetKey(event);
}
}
}
}
private long putAndGetKey(Object object) throws CacheException {
// Get the tail key
Long key = Long.valueOf(getTailKey());
// Put the object into the region at that key
this.region.put(key, (AsyncEvent)object);
// Increment the tail key
// It is important that we increment the tail
// key after putting in the region, this is the
// signal that a new object is available.
incrementTailKey();
if (this.logger.fineEnabled()) {
this.logger.fine(this + ": Inserted " + key + "->" + object);
}
if (object instanceof Conflatable) {
removeOldEntry((Conflatable)object, key);
}
return key.longValue();
}
private long putAndGetKeyNoSync(Object object) throws CacheException {
// don't sync on whole put; callers will do the puts in parallel but
// will wait later for previous tailKey put to complete after its own
// put is done
Long key;
synchronized (this) {
initializeKeys();
// Get and increment the current key
// Go for full sync in case of wrapover
long ckey = this.currentKey;
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Determined current key: " + ckey);
}
key = Long.valueOf(ckey);
this.currentKey = inc(ckey);
}
try {
// Put the object into the region at that key
this.region.put(key, (AsyncEvent)object);
if (this.logger.fineEnabled()) {
this.logger.fine(this + ": Inserted " + key + "->" + object);
}
} finally {
final Object sync = this.pendingPuts;
synchronized (sync) {
// Increment the tail key
// It is important that we increment the tail
// key after putting in the region, this is the
// signal that a new object is available.
while (true) {
if (key.longValue() == this.tailKey.get()) {
// this is the next thread, so increment tail and signal all other
// waiting threads if required
incrementTailKey();
// check pendingPuts
boolean notifyWaiters = false;
if (this.pendingPuts.size() > 0) {
Iterator itr = this.pendingPuts.iterator();
while (itr.hasNext()) {
Long k = itr.next();
if (k.longValue() == this.tailKey.get()) {
incrementTailKey();
// removed something from pending queue, so notify any waiters
if (!notifyWaiters) {
notifyWaiters =
(this.pendingPuts.size() >= this.maxPendingPuts);
}
itr.remove();
}
else {
break;
}
}
}
if (notifyWaiters) {
sync.notifyAll();
}
break;
}
else if (this.pendingPuts.size() < this.maxPendingPuts) {
this.pendingPuts.add(key);
break;
}
else {
// wait for the queue size to go down
boolean interrupted = Thread.interrupted();
Throwable t = null;
try {
sync.wait(5);
} catch (InterruptedException ie) {
t = ie;
interrupted = true;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
((LocalRegion)this.region).getCancelCriterion()
.checkCancelInProgress(t);
}
}
}
}
}
if (object instanceof Conflatable) {
removeOldEntry((Conflatable)object, key);
}
return key.longValue();
}
public synchronized AsyncEvent take() throws CacheException {
// Unsupported since we have no callers.
// If we do want to support it then each caller needs
// to call freeOffHeapResources and the returned GatewaySenderEventImpl
throw new UnsupportedOperationException();
// resetLastPeeked();
// AsyncEvent object = peekAhead();
// // If it is not null, destroy it and increment the head key
// if (object != null) {
// Long key = this.peekedIds.remove();
// if (this.logger.finerEnabled()) {
// this.logger.finer(this + ": Retrieved " + key + "->" + object);
// }
// // Remove the entry at that key with a callback arg signifying it is
// // a WAN queue so that AbstractRegionEntry.destroy can get the value
// // even if it has been evicted to disk. In the normal case, the
// // AbstractRegionEntry.destroy only gets the value in the VM.
// this.region.destroy(key, RegionQueue.WAN_QUEUE_TOKEN);
// updateHeadKey(key.longValue());
//
// if (this.logger.finerEnabled()) {
// this.logger.finer(this + ": Destroyed " + key + "->" + object);
// }
// }
// return object;
}
public List take(int batchSize) throws CacheException {
// This method has no callers.
// If we do want to support it then the callers
// need to call freeOffHeapResources on each returned GatewaySenderEventImpl
throw new UnsupportedOperationException();
// List batch = new ArrayList(
// batchSize * 2);
// for (int i = 0; i < batchSize; i++) {
// AsyncEvent obj = take();
// if (obj != null) {
// batch.add(obj);
// } else {
// break;
// }
// }
// if (this.logger.finerEnabled()) {
// this.logger
// .finer(this + ": Took a batch of " + batch.size() + " entries");
// }
// return batch;
}
/**
* This method removes the last entry. However, it will only let the user
* remove entries that they have peeked. If the entry was not peeked, this
* method will silently return.
*/
public synchronized void remove() throws CacheException {
if (this.peekedIds.isEmpty()) {
return;
}
Long key = this.peekedIds.remove();
try {
// Increment the head key
updateHeadKey(key.longValue());
removeIndex(key);
// Remove the entry at that key with a callback arg signifying it is
// a WAN queue so that AbstractRegionEntry.destroy can get the value
// even if it has been evicted to disk. In the normal case, the
// AbstractRegionEntry.destroy only gets the value in the VM.
this.region.localDestroy(key, WAN_QUEUE_TOKEN);
this.stats.decQueueSize();
} catch (EntryNotFoundException ok) {
// this is acceptable because the conflation can remove entries
// out from underneath us.
if (this.logger.fineEnabled()) {
this.logger.fine(this + ": Did not destroy entry at " + key
+ " it was not there. It should have been removed by conflation.");
}
}
boolean wasEmpty = this.lastDispatchedKey == this.lastDestroyedKey;
this.lastDispatchedKey = key;
if (wasEmpty) {
this.notify();
}
if (this.logger.fineEnabled()) {
this.logger.fine(this + ": Destroyed entry at key " + key
+ "setting the lastDispatched Key to " + this.lastDispatchedKey
+ ". The last destroyed entry was " + this.lastDestroyedKey);
}
}
/**
* This method removes batchSize entries from the queue. It will only remove
* entries that were previously peeked.
*
* @param size
* the number of entries to remove
*/
public void remove(int size) throws CacheException {
for (int i = 0; i < size; i++) {
remove();
}
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Removed a batch of " + size + " entries");
}
}
public void remove(Object object)
{
remove();
}
public Object peek() throws CacheException {
//resetLastPeeked();
Object object = peekAhead();
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peeked " + peekedIds + "->" + object);
}
return object;
// OFFHEAP returned object only used to see if queue is empty
// so no need to worry about off-heap refCount.
}
public List peek(int size) throws CacheException {
return peek(size, -1);
}
public List peek(int size, int timeToWait)
throws CacheException {
long start = System.currentTimeMillis();
long end = start + timeToWait;
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peek start time=" + start + " end time="
+ end + " time to wait=" + timeToWait);
}
List batch = new ArrayList(size * 2); // why
// *2?
//resetLastPeeked();
while (batch.size() < size) {
AsyncEvent object = peekAhead();
if (object != null && object instanceof GatewaySenderEventImpl) {
GatewaySenderEventImpl copy = ((GatewaySenderEventImpl)object).makeHeapCopyIfOffHeap();
if (copy == null) {
continue;
}
object = copy;
}
// Conflate here
if (object != null) {
batch.add(object);
} else {
// If time to wait is -1 (don't wait) or time interval has elapsed
long currentTime = System.currentTimeMillis();
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peek current time: " + currentTime);
}
if (timeToWait == -1 || (end <= currentTime)) {
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peek breaking");
}
break;
}
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peek continuing");
}
// Sleep a bit before trying again.
try {
Thread.sleep(50);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
continue;
}
}
if (this.logger.finerEnabled()) {
this.logger.finer(this + ": Peeked a batch of " + batch.size()
+ " entries");
}
return batch;
// OFFHEAP: all returned AsyncEvent end up being removed from queue after the batch is sent
// so no need to worry about off-heap refCount.
}
@Override
public String toString() {
return "SerialGatewaySender queue :" + this.regionName;
}
public int size() {
int size = ((LocalRegion) this.region).entryCount();
return size + this.sender.getTmpQueuedEventSize();
}
@SuppressWarnings("rawtypes")
public void addCacheListener(CacheListener listener) {
AttributesMutator mutator = this.region.getAttributesMutator();
mutator.addCacheListener(listener);
}
@SuppressWarnings("rawtypes")
public void removeCacheListener() {
AttributesMutator mutator = this.region.getAttributesMutator();
CacheListener[] listeners = this.region.getAttributes().getCacheListeners();
for(int i=0; i < listeners.length; i++){
if(listeners[i] instanceof SerialSecondaryGatewayListener){
mutator.removeCacheListener(listeners[i]);
break;
}
}
}
// No need to synchronize because it is called from a synchronized method
private boolean removeOldEntry(Conflatable object, Long tailKey)
throws CacheException {
boolean keepOldEntry = true;
// Determine whether conflation is enabled for this queue and object
// Conflation is enabled iff:
// - this queue has conflation enabled
// - the object can be conflated
if (this.enableConflation && object.shouldBeConflated()) {
if (logger.fineEnabled()) {
logger.fine(this + ": Conflating " + object + " at queue index="
+ tailKey + " queue size=" + size() + " head=" + this.headKey
+ " tail=" + tailKey);
}
// Determine whether this region / key combination is already indexed.
// If so, it is already in the queue. Update the value in the queue and
// set the shouldAddToQueue flag accordingly.
String rName = object.getRegionToConflate();
Object key = object.getKeyToConflate();
Long previousIndex;
synchronized (this) {
Map