com.gemstone.gemfire.internal.cache.CreateRegionProcessor Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-core Show documentation
Show all versions of gemfire-core Show documentation
SnappyData store based off Pivotal GemFireXD
The newest version!
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.gemstone.gemfire.CancelException;
import com.gemstone.gemfire.DataSerializer;
import com.gemstone.gemfire.SystemFailure;
import com.gemstone.gemfire.cache.CacheFactory;
import com.gemstone.gemfire.cache.PartitionAttributes;
import com.gemstone.gemfire.cache.RegionDestroyedException;
import com.gemstone.gemfire.cache.Scope;
import com.gemstone.gemfire.distributed.DistributedMember;
import com.gemstone.gemfire.distributed.internal.DistributionAdvisee;
import com.gemstone.gemfire.distributed.internal.DistributionAdvisor;
import com.gemstone.gemfire.distributed.internal.DistributionManager;
import com.gemstone.gemfire.distributed.internal.DistributionMessage;
import com.gemstone.gemfire.distributed.internal.HighPriorityDistributionMessage;
import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
import com.gemstone.gemfire.distributed.internal.MessageWithReply;
import com.gemstone.gemfire.distributed.internal.ReplyException;
import com.gemstone.gemfire.distributed.internal.ReplyMessage;
import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.Assert;
import com.gemstone.gemfire.internal.InternalDataSerializer;
import com.gemstone.gemfire.internal.cache.CacheDistributionAdvisor.CacheProfile;
import com.gemstone.gemfire.internal.cache.CacheDistributionAdvisor.InitialImageAdvice;
import com.gemstone.gemfire.internal.cache.partitioned.PRLocallyDestroyedException;
import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor;
import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor.PartitionProfile;
import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberID;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
/**
* This message processor handles creation and initial exchange of
* com.gemstone.gemfire.internal.cache.CacheDistributionAdvisor.Profiles. It
* represents creation of
* a {@link CacheDistributionAdvisee}. Name remains CreateRegion to avoid
* merge conflicts.
* @author Eric Zoerner
*/
public class CreateRegionProcessor implements ProfileExchangeProcessor {
protected CacheDistributionAdvisee newRegion;
/** Creates a new instance of CreateRegionProcessor */
public CreateRegionProcessor(CacheDistributionAdvisee newRegion) {
this.newRegion = newRegion;
}
/** this method tells other members that the region is being created */
public void initializeRegion() {
InternalDistributedSystem system = this.newRegion.getSystem();
LogWriterI18n logger = system.getLogWriter().convertToLogWriterI18n();
// try 5 times, see CreateRegionMessage#skipDuringInitialization
for (int retry=0; retry<5; retry++) {
Set recps = getRecipients();
logger.fine(LocalizedStrings.CreateRegionProcessor_CREATING_0.toLocalizedString(this.newRegion));
if (recps.isEmpty()) {
logger.fine("CreateRegionProcessor.initializeRegion, no recipients, msg not sent");
this.newRegion.getDistributionAdvisor().setInitialized();
LocalRegion lr = (LocalRegion)this.newRegion;
lr.setProfileExchanged(true);
EventTracker tracker = lr.getEventTracker();
if (tracker != null) {
tracker.setInitialized();
}
return;
}
CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps);
boolean useMcast = false; // never use multicast for region meta-level ops (can cause hangs)
CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast);
// since PR buckets can be created during cache entry operations, enable
// severe alert processing if we're creating one of them
if (((LocalRegion)newRegion).isUsedForPartitionedRegionBucket()) {
replyProc.enableSevereAlertProcessing();
msg.severeAlertCompatible = true;
}
this.newRegion.getDistributionManager().putOutgoing(msg);
// this was in a while() loop, which is incorrect use of a reply processor.
// Reply procs are deregistered when they return from waitForReplies
try {
// Don't allow a region to be created if the distributed system is
// disconnecting
this.newRegion.getCache().getCancelCriterion().checkCancelInProgress(null);
// initialize the VMIdAdvisor if required (if transactions are later
// received from other nodes then it better be in usable shape)
this.newRegion.getSystem().getVMIdAdvisor().handshake();
// This isn't right. We should disable region creation in general, not just
// the remote case here...
// // Similarly, don't allow new regions to be created if the cache is closing
// GemFireCache cache = (GemFireCache)this.newRegion.getCache();
// if (cache.isClosing()) {
// throw new CacheClosedException("Cannot create a region when the cache is closing");
// }
try {
replyProc.waitForRepliesUninterruptibly();
if (!replyProc.needRetry()) {
break;
}
}
catch (ReplyException e) {
Throwable t = e.getCause();
if (t instanceof IllegalStateException) {
// region is incompatible with region in another cache
throw (IllegalStateException)t;
}
e.handleAsUnexpected();
break;
}
} finally {
replyProc.cleanup();
EventTracker tracker = ((LocalRegion)this.newRegion).getEventTracker();
if (tracker != null) {
tracker.setInitialized();
}
if (((LocalRegion)this.newRegion).isUsedForPartitionedRegionBucket()) {
LogWriterI18n log = ((LocalRegion)this.newRegion).getLogWriterI18n();
if (log.fineEnabled()) {
log.fine("initialized bucket event tracker: " + tracker);
}
}
}
} // while
// tell advisor that it has been initialized since a profile exchange occurred
this.newRegion.getDistributionAdvisor().setInitialized();
((LocalRegion)this.newRegion).setProfileExchanged(true);
}
protected Set getRecipients() {
DistributionAdvisee parent = this.newRegion.getParentAdvisee();
Set recps = null;
if (parent == null) { // root region, all recipients
InternalDistributedSystem system = this.newRegion.getSystem();
recps = system.getDistributionManager().getOtherDistributionManagerIds();
}
else {
// get recipients that have the parent region defined as distributed.
recps = getAdvice();
}
return recps;
}
public InitialImageAdvice getInitialImageAdvice(InitialImageAdvice previousAdvice) {
return newRegion.getCacheDistributionAdvisor().adviseInitialImage(previousAdvice);
}
private Set getAdvice() {
if (this.newRegion instanceof BucketRegion) {
return ((BucketRegion)this.newRegion).getBucketAdvisor().adviseProfileExchange();
} else {
DistributionAdvisee rgn = this.newRegion.getParentAdvisee();
DistributionAdvisor advisor = rgn.getDistributionAdvisor();
return advisor.adviseGeneric();
}
}
protected CreateRegionMessage getCreateRegionMessage(Set recps,
ReplyProcessor21 proc, boolean useMcast) {
CreateRegionMessage msg = new CreateRegionMessage();
msg.regionPath = this.newRegion.getFullPath();
msg.profile = (CacheProfile)this.newRegion.getProfile();
msg.processorId = proc.getProcessorId();
msg.concurrencyChecksEnabled = this.newRegion.getAttributes().getConcurrencyChecksEnabled();
msg.setMulticast(useMcast);
msg.setRecipients(recps);
return msg;
}
public void setOnline(InternalDistributedMember target) {
}
class CreateRegionReplyProcessor extends ReplyProcessor21 {
CreateRegionReplyProcessor(Set members) {
super((InternalDistributedSystem)CreateRegionProcessor.this.newRegion.
getCache().getDistributedSystem(),
members);
}
/**
* guards application of event state to the region so that we deserialize
* and apply event state only once
*/
private final Object eventStateLock = new Object();
/** whether event state has been recorded in the region */
private boolean eventStateRecorded = false;
private boolean allMembersSkippedChecks = true;
/**
* true if all members skipped CreateRegionMessage#checkCompatibility(),
* in which case CreateRegionMessage should be retried.
*/
public boolean needRetry() {
return this.allMembersSkippedChecks;
}
@Override
public void process(DistributionMessage msg) {
Assert.assertTrue(msg instanceof CreateRegionReplyMessage,
"CreateRegionProcessor is unable to process message of type " +
msg.getClass());
CreateRegionReplyMessage reply = (CreateRegionReplyMessage)msg;
LocalRegion lr = (LocalRegion)newRegion;
if (lr.getLogWriterI18n().fineEnabled()) {
lr.getLogWriterI18n().fine("CreateRegionProcessor processing " + msg);
}
try {
if (reply.profile != null) {
// #45934 set flags before adding the profile
if (newRegion instanceof DistributedRegion) {
DistributedRegion dr = (DistributedRegion) newRegion;
if (!dr.getDataPolicy().withPersistence() && reply.profile.isPersistent) {
dr.setGeneratedVersionTag(false);
}
}
if (CreateRegionMessage.isLocalAccessor(newRegion) && reply.profile.isPersistent) {
lr.enableConcurrencyChecks();
}
CacheDistributionAdvisor cda = CreateRegionProcessor.this.newRegion.getCacheDistributionAdvisor();
cda.putProfile(reply.profile);
if (reply.bucketProfiles != null) {
RegionAdvisor ra = (RegionAdvisor)cda;
ra.putBucketRegionProfiles(reply.bucketProfiles);
}
if (reply.eventState != null && lr.hasEventTracker()) {
synchronized(eventStateLock) {
if (!this.eventStateRecorded) {
this.eventStateRecorded = true;
Object eventState = null;
eventState = reply.eventState;
if (EventTracker.VERBOSE) {
lr.cache.getLoggerI18n().info(
LocalizedStrings.DEBUG,
"Applying reply event state to region " + lr.getName()
+ " from " + reply.getSender());
}
lr.recordEventState(reply.getSender(), (Map)eventState);
}
}
}
reply.eventState = null;
if (lr.isUsedForPartitionedRegionBucket()) {
((BucketRegion)lr).updateEventSeqNum(reply.seqKeyForWan);
}
// Process any delta filter-profile messages received during profile
// exchange.
// The pending messages are queued in the local profile.
FilterProfile remoteFP = reply.profile.filterProfile;
if (remoteFP != null) {
FilterProfile localFP = ((LocalRegion)newRegion).filterProfile;
List messages = localFP.getQueuedFilterProfileMsgs(reply.getSender());
// Thread init level is set since region is used during CQ registration.
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT);
try {
remoteFP.processQueuedFilterProfileMsgs(messages);
} finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
localFP.removeQueuedFilterProfileMsgs(reply.getSender());
}
}
}
if (newRegion instanceof DistributedRegion) {
DistributedRegion dr = (DistributedRegion)newRegion;
if (reply.destroyedId != null) {
dr.getPersistenceAdvisor().removeMember(reply.destroyedId);
}
if (reply.hasActiveTransaction) {
dr.activeTXNodes.add(reply.getSender());
}
}
if (!reply.skippedCompatibilityChecks) {
allMembersSkippedChecks = false;
}
} finally {
// invoke super.process() even in case of exceptions (bug #41556)
if (lr.getLogWriterI18n().fineEnabled()) {
lr.getLogWriterI18n().fine("CreateRegionProcessor invoking super.process()");
}
super.process(msg);
}
}
/**
* IllegalStateException is an anticipated reply exception. Receiving
* multiple replies with this exception is normal.
*/
@Override
protected boolean logMultipleExceptions() {
return false;
}
}
public static final class CreateRegionMessage extends HighPriorityDistributionMessage
implements MessageWithReply {
public boolean concurrencyChecksEnabled;
protected String regionPath;
protected CacheProfile profile;
protected int processorId;
private transient boolean incompatible = false;
private transient ReplyException replyException;
private transient LogWriterI18n log;
private transient CacheProfile replyProfile;
private transient ArrayList replyBucketProfiles;
private transient Object eventState;
protected transient boolean severeAlertCompatible;
private transient boolean skippedCompatibilityChecks;
@Override
public int getProcessorId() {
return this.processorId;
}
@Override
public boolean isSevereAlertCompatible() {
return severeAlertCompatible;
}
@Override
public boolean sendViaJGroups() {
return true;
}
@Override
protected void process(DistributionManager dm) {
log = dm.getLoggerI18n();
int oldLevel = // Set thread local flag to allow entrance through initialization Latch
LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT);
LocalRegion lclRgn = null;
CacheDistributionAdvisee cda = null;
PersistentMemberID destroyedId = null;
try {
// get the region from the path, but do NOT wait on initialization,
// otherwise we could have a distributed deadlock
GemFireCacheImpl cache = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
//Fix for bug 42051 - Discover any regions that are in the process
//of being destroyed
DistributedRegion destroyingRegion = cache.getRegionInDestroy(this.regionPath);
if(destroyingRegion != null) {
destroyedId = destroyingRegion.getPersistentID();
}
lclRgn = (LocalRegion)cache.getRegion(this.regionPath);
if (lclRgn instanceof CacheDistributionAdvisee) {
// bug 37604 - don't return a profile if this is a bucket and the owner
// has been locally destroyed
if (lclRgn.isUsedForPartitionedRegionBucket()) {
if (!((BucketRegion)lclRgn).isPartitionedRegionOpen()) {
if(log.fineEnabled()) {
log.fine(" " + this);
}
return;
}
}
cda = (CacheDistributionAdvisee)lclRgn;
handleCacheDistributionAdvisee(cda, true);
}
else {
if (lclRgn == null) {
// check to see if a ProxyBucketRegion (not a true region) exists
cda = PartitionedRegionHelper.getProxyBucketRegion(cache,
this.regionPath, false);
handleCacheDistributionAdvisee(cda, false);
}
else {
log.fine(" " + this);
}
}
}
catch (PRLocallyDestroyedException fre) {
if(log.fineEnabled()) {
log.fine(" " + this);
}
}
catch (RegionDestroyedException e) {
log.fine(" " + this);
}
catch (CancelException e) {
log.fine(" " + this);
}
catch (Throwable t) {
Error err;
if (t instanceof Error && SystemFailure.isJVMFailureError(
err = (Error)t)) {
SystemFailure.initiateFailure(err);
// If this ever returns, rethrow the error. We're poisoned
// now, so don't let this thread continue.
throw err;
}
// Whenever you catch Error or Throwable, you must also
// check for fatal JVM error (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (replyException == null) {
replyException = new ReplyException(t);
}
else {
log.warning(LocalizedStrings.CreateRegionProcessor_MORE_THAN_ONE_EXCEPTION_THROWN_IN__0, this, t);
}
}
finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
CreateRegionReplyMessage replyMsg = new CreateRegionReplyMessage();
replyMsg.profile = replyProfile;
replyMsg.bucketProfiles = replyBucketProfiles;
replyMsg.eventState = this.eventState;
replyMsg.destroyedId = destroyedId;
replyMsg.setProcessorId(this.processorId);
replyMsg.setRecipient(this.getSender());
replyMsg.skippedCompatibilityChecks = this.skippedCompatibilityChecks;
// check if there is an active transaction on the region since it will
// send the TX message to the new node even if BucketRegion does not
// exist on this node
if (cda != null) {
// if there are any transactions that have been committed/rolled back
// recently then conservatively mark hasActiveTransaction since a
// commit/rollback may still be on the wire
TXManagerImpl txMgr = (TXManagerImpl)cda.getCache()
.getCacheTransactionManager();
if (txMgr.finishedTXStates.hasTransactions()) {
replyMsg.hasActiveTransaction = true;
}
else {
Collection txns = txMgr
.getHostedTransactionsInProgress();
if (!txns.isEmpty()) {
for (TXStateProxy txProxy : txns) {
if (txProxy.hasAffectedRegion(cda)) {
replyMsg.hasActiveTransaction = true;
break;
}
}
}
}
}
if (lclRgn != null && lclRgn.isUsedForPartitionedRegionBucket()) {
replyMsg.seqKeyForWan = ((BucketRegion)lclRgn).getEventSeqNum().get();
}
if (replyException != null && !this.incompatible) {
// no need to log the exception if it was caused by compatibility check
log.fine("While processing {" + this +
"}, got exception, returning to sender", replyException);
}
replyMsg.setException(replyException);
dm.putOutgoing(replyMsg);
if(lclRgn instanceof PartitionedRegion)
((PartitionedRegion)lclRgn).sendIndexCreationMsg(this.getSender());
}
}
/**
* Attempts to process this message with the specified
* CacheDistributionAdvisee
.
* @param cda the CacheDistributionAdvisee to apply this profile to
* @param isRealRegion true if CacheDistributionAdvisee is a real region
*/
private void handleCacheDistributionAdvisee(
CacheDistributionAdvisee cda, boolean isRealRegion) {
if (cda == null) {
// local region or proxy bucket region not found
log.fine(" " + this); // matches old logging
return;
}
String errorMsg = null;
if (isRealRegion) {
// only check compatibility if this advisee is a real region
errorMsg = checkCompatibility(cda, this.profile);
}
if (errorMsg != null) {
this.incompatible = true;
this.log.fine(this + " ");
}
buff.append("; skippedCompatibilityChecks=");
buff.append(this.skippedCompatibilityChecks);
buff.append("; hasActiveTransaction=");
buff.append(this.hasActiveTransaction);
buff.append("; seqKeyForWan=");
buff.append(this.seqKeyForWan);
if (this.getException() != null) {
buff.append("; with exception {")
.append(getException().getMessage()).append("}");
}
buff.append(")");
return buff.toString();
}
}
}