com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-core Show documentation
Show all versions of gemfire-core Show documentation
SnappyData store based off Pivotal GemFireXD
The newest version!
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.cache.hdfs.internal;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import com.gemstone.gemfire.cache.CacheException;
import com.gemstone.gemfire.cache.EntryNotFoundException;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.SystemTimer;
import com.gemstone.gemfire.internal.SystemTimer.SystemTimerTask;
import com.gemstone.gemfire.internal.cache.ColocationHelper;
import com.gemstone.gemfire.internal.cache.ForceReattemptException;
import com.gemstone.gemfire.internal.cache.LocalRegion;
import com.gemstone.gemfire.internal.cache.PartitionedRegion;
import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
import com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue;
/**
* Parallel Gateway Sender Queue extended for HDFS functionality
*
* @author Hemant Bhanawat
*/
public class HDFSParallelGatewaySenderQueue extends ParallelGatewaySenderQueue {
private int currentBucketIndex = 0;
private int elementsPeekedAcrossBuckets = 0;
private SystemTimer rollListTimer = null;
public static final String ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP = "gemfire.ROLL_SORTED_LIST_TIME_INTERVAL_MS";
private final int ROLL_SORTED_LIST_TIME_INTERVAL_MS = Integer.getInteger(ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP, 3000);
public HDFSParallelGatewaySenderQueue(AbstractGatewaySender sender,
Set userPRs, int idx, int nDispatcher) {
super(sender, userPRs, idx, nDispatcher);
//only first dispatcher Hemant?
if (sender.getBucketSorted() && this.index == 0) {
rollListTimer = new SystemTimer(sender.getCache().getDistributedSystem(),
true, logger);
// schedule the task to roll the skip lists
rollListTimer.scheduleAtFixedRate(new RollSortedListsTimerTask(),
ROLL_SORTED_LIST_TIME_INTERVAL_MS, ROLL_SORTED_LIST_TIME_INTERVAL_MS);
}
}
@Override
public Object peek() throws InterruptedException, CacheException {
/* If you call peek and use super.peek it leads to the following exception.
* So I'm adding an explicit UnsupportedOperationException.
Caused by: java.lang.ClassCastException: com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue cannot be cast to com.gemstone.gemfire.internal.cache.BucketRegionQueue
at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.getRandomPrimaryBucket(ParallelGatewaySenderQueue.java:964)
at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.peek(ParallelGatewaySenderQueue.java:1078)
*/
throw new UnsupportedOperationException();
}
@Override
public void cleanUp() {
super.cleanUp();
cancelRollListTimer();
}
private void cancelRollListTimer() {
if (rollListTimer != null) {
rollListTimer.cancel();
rollListTimer = null;
}
}
/**
* A call to this function peeks elements from the first local primary bucket.
* Next call to this function peeks elements from the next local primary
* bucket and so on.
*/
@Override
public List peek(int batchSize, int timeToWait) throws InterruptedException,
CacheException {
List batch = new ArrayList();
int batchSizeInBytes = batchSize*1024*1024;
PartitionedRegion prQ = getRandomShadowPR();
if (prQ == null || prQ.getLocalMaxMemory() == 0) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
blockProcesorThreadIfRequired();
return batch;
}
ArrayList list = null;
ArrayList pbuckets = new ArrayList(prQ
.getDataStore().getAllLocalPrimaryBucketIds());
ArrayList buckets = new ArrayList();
for(Integer i : pbuckets) {
if(i % this.nDispatcher == this.index)
buckets.add(i);
}
// In case of failures, peekedEvents would possibly have some elements
// add them.
if (this.resetLastPeeked) {
int previousBucketId = -1;
boolean stillPrimary = true;
Iterator iter = peekedEvents.iterator();
// we need to remove the events of the bucket that are no more primary on
// this node as they cannot be persisted from this node.
while(iter.hasNext()) {
HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)iter.next();
if (previousBucketId != hdfsEvent.getBucketId()){
stillPrimary = buckets.contains(hdfsEvent.getBucketId());
previousBucketId = hdfsEvent.getBucketId();
}
if (stillPrimary)
batch.add(hdfsEvent);
else {
iter.remove();
}
}
this.resetLastPeeked = false;
}
if (buckets.size() == 0) {
// Sleep a bit before trying again. provided by Dan
try {
Thread.sleep(50);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return batch;
}
if (this.sender.getBucketSorted()) {
}
// Each call to this function returns index of next bucket
// that is to be processed. This function takes care
// of the bucket sequence that is peeked by a sequence of
// peek calls.
// If there are bucket movements between two consecutive
// calls to this function then there is chance that a bucket
// is processed twice while another one is skipped. But, that is
// ok because in the next round, it will be processed.
Integer bIdIndex = getCurrentBucketIndex(buckets.size());
// If we have gone through all the buckets once and no
// elements were peeked from any of the buckets, take a nap.
// This always sleep in the first call but that should be ok
// because the timeToWait in practical use cases would be greater
// than this sleep of 100 ms.
if (bIdIndex == 0 && getAndresetElementsPeekedAcrossBuckets() == 0) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
HDFSBucketRegionQueue hrq = ((HDFSBucketRegionQueue)prQ
.getDataStore().getLocalBucketById(buckets.get(bIdIndex)));
if (hrq == null) {
// bucket moved to another node after getAllLocalPrimaryBucketIds
// was called. Peeking not possible. return.
return batch;
}
long entriesWaitingTobePeeked = hrq.totalEntries();
if (entriesWaitingTobePeeked == 0) {
blockProcesorThreadIfRequired();
return batch;
}
long currentTimeInMillis = System.currentTimeMillis();
long bucketSizeInBytes = hrq.getQueueSizeInBytes();
if (((currentTimeInMillis - hrq.getLastPeekTimeInMillis()) > timeToWait)
|| ( bucketSizeInBytes > batchSizeInBytes)
|| hrq.shouldDrainImmediately()) {
// peek now
if (logger.finerEnabled()) {
logger.finer("Peeking queue " + hrq.getId() + ": bucketSizeInBytes " + bucketSizeInBytes
+ ": batchSizeInBytes" + batchSizeInBytes
+ ": timeToWait" + timeToWait
+ ": (currentTimeInMillis - hrq.getLastPeekTimeInMillis())" + (currentTimeInMillis - hrq.getLastPeekTimeInMillis()));
}
list = peekAhead(buckets.get(bIdIndex), hrq);
if (list != null && list.size() != 0 ) {
for (Object object : list) {
batch.add(object);
peekedEvents.add((HDFSGatewayEventImpl)object);
}
}
}
else {
blockProcesorThreadIfRequired();
}
if (logger.fineEnabled() && batch.size() > 0) {
logger.fine(this + ": Peeked a batch of " + batch.size() + " entries");
}
setElementsPeekedAcrossBuckets(batch.size());
return batch;
}
/**
* This function maintains an index of the last processed bucket.
* When it is called, it returns index of the next bucket.
* @param totalBuckets
* @return current bucket index
*/
private int getCurrentBucketIndex(int totalBuckets) {
int retBucket = currentBucketIndex;
if (retBucket >= totalBuckets) {
currentBucketIndex = 0;
retBucket = 0;
}
currentBucketIndex++;
return retBucket;
}
@Override
public void remove(int batchSize) throws CacheException {
int destroyed = 0;
HDFSGatewayEventImpl event = null;
if (this.peekedEvents.size() > 0)
event = (HDFSGatewayEventImpl)this.peekedEvents.remove();
while (event != null && destroyed < batchSize) {
LocalRegion currentRegion = event.getRegion();
int currentBucketId = event.getBucketId();
int bucketId = event.getBucketId();
ArrayList listToDestroy = new ArrayList();
ArrayList