Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache;
import java.io.PrintStream;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import com.gemstone.gemfire.cache.EvictionAction;
import com.gemstone.gemfire.cache.EvictionAlgorithm;
import com.gemstone.gemfire.compression.Compressor;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.ClassPathLoader;
import com.gemstone.gemfire.internal.CopyOnWriteHashSet;
import com.gemstone.gemfire.internal.cache.DiskInitFile.DiskRegionFlag;
import com.gemstone.gemfire.internal.cache.persistence.DiskRegionView;
import com.gemstone.gemfire.internal.cache.persistence.PersistenceAdvisorImpl;
import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberID;
import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberPattern;
import com.gemstone.gemfire.internal.cache.versions.DiskRegionVersionVector;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionHolder;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
import com.gemstone.gemfire.internal.cache.versions.VersionSource;
import com.gemstone.gemfire.internal.cache.versions.VersionTag;
import com.gemstone.gemfire.internal.concurrent.CustomEntryConcurrentHashMap;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import joptsimple.internal.Strings;
/**
* Code shared by both DiskRegion and RecoveredDiskRegion.
*
* @author Darrel Schneider
*
* @since prPersistSprint2
*/
public abstract class AbstractDiskRegion implements DiskRegionView {
public static final boolean TRACE_VIEW = Boolean.getBoolean("gemfire.TRACE_PERSISTENT_VIEW") || PersistenceAdvisorImpl.TRACE;
////////////////////// Instance Fields ///////////////////////
private final DiskStoreImpl ds;
private final Long id;
private long uuid;
private long clearOplogEntryId = DiskStoreImpl.INVALID_ID;
private RegionVersionVector clearRVV;
private byte lruAlgorithm;
private byte lruAction;
private int lruLimit;
private int concurrencyLevel = 16;
private int initialCapacity = 16;
private float loadFactor = 0.75f;
private boolean statisticsEnabled;
private boolean isBucket;
/** True if a persistent backup is needed */
private boolean backup;
/** Additional flags that are persisted to the meta-data. */
private final EnumSet flags;
/**
* A flag used to indicate that this disk region
* is being recreated using already existing data on the disk.
*/
private boolean isRecreated;
private boolean configChanged;
private boolean aboutToDestroy;
private boolean aboutToDestroyDataStorage;
private String partitionName;
private int startingBucketId;
private String compressorClassName;
private Compressor compressor;
private boolean enableOffHeapMemory;
private final LogWriterI18n logger;
/**
* Records the version vector of what has been persisted to disk.
* This may lag behind the version vector of what is in memory, because
* updates may be written asynchronously to disk. We need to keep track
* of exactly what has been written to disk so that we can record a version
* vector at the beginning of each oplog.
*
* The version vector of what is in memory is held in is held
* in LocalRegion.versionVector.
*/
private RegionVersionVector versionVector;
/**
* A flag whether the current version vector accurately represents
* what has been written to this members disk.
*/
private volatile boolean rvvTrusted = true;
public static final long INVALID_UUID = -1L;
/**
* @param ds
* the disk store used for this disk region
* @param name
* the name of this disk region
* @param uuid
* A system-wide unique ID for the region. Buckets of same region can
* have same IDs. Essentially the name+uuid combination should be
* unique cluster-wide and uniform on all nodes for an instance of
* region created in the cluster. This is currently passed for a
* region via InternalRegionArguments.setUUID and is always zero for
* GemFire regions (#48335). One safe way to generate UUIDs is using
* LocalRegion.newUUID.
*/
protected AbstractDiskRegion(DiskStoreImpl ds, String name, long uuid) {
DiskRegionView drv;
if (uuid != 0 && uuid != INVALID_UUID) {
drv = ds.getDiskInitFile().getDiskRegionByName(name);
// if UUID does not match then we know that this is a new region creation
long drvUUID;
if (drv != null && (drvUUID = drv.getUUID()) != 0
&& drvUUID != INVALID_UUID && drvUUID != uuid) {
// explicitly destroy the region (#48335)
ds.destroyRegion(name, false);
}
}
drv = ds.getDiskInitFile().takeDiskRegionByName(name);
if (drv != null) {
// if we found one in the initFile then we take it out of it and this
// one we are constructing will replace it in the diskStore drMap.
this.ds = drv.getDiskStore();
this.id = drv.getId();
this.uuid = drv.getUUID();
this.backup = drv.isBackup();
this.clearOplogEntryId = drv.getClearOplogEntryId();
this.clearRVV = drv.getClearRVV();
this.lruAlgorithm = drv.getLruAlgorithm();
this.lruAction = drv.getLruAction();
this.lruLimit = drv.getLruLimit();
this.concurrencyLevel = drv.getConcurrencyLevel();
this.initialCapacity = drv.getInitialCapacity();
this.loadFactor = drv.getLoadFactor();
this.statisticsEnabled = drv.getStatisticsEnabled();
this.isBucket = drv.isBucket();
this.flags = drv.getFlags();
this.partitionName = drv.getPartitionName();
this.startingBucketId = drv.getStartingBucketId();
this.myInitializingId = drv.getMyInitializingID();
this.myInitializedId = drv.getMyPersistentID();
this.aboutToDestroy = drv.wasAboutToDestroy();
this.aboutToDestroyDataStorage = drv.wasAboutToDestroyDataStorage();
this.onlineMembers = new CopyOnWriteHashSet(drv.getOnlineMembers());
this.offlineMembers = new CopyOnWriteHashSet(drv.getOfflineMembers());
this.equalMembers = new CopyOnWriteHashSet(drv.getOfflineAndEqualMembers());
this.isRecreated = true;
//Use the same atomic counters as the previous disk region. This ensures that
//updates from threads with a reference to the old region update this disk region
//See 49943
this.numOverflowOnDisk = ((AbstractDiskRegion)drv).numOverflowOnDisk;
this.numEntriesInVM = ((AbstractDiskRegion)drv).numEntriesInVM;
this.numOverflowBytesOnDisk = ((AbstractDiskRegion)drv).numOverflowBytesOnDisk;
this.entries = drv.getRecoveredEntryMap();
this.readyForRecovery = drv.isReadyForRecovery();
this.recoveredEntryCount = drv.getRecoveredEntryCount();
this.recoveryCompleted = ((AbstractDiskRegion)drv).recoveryCompleted;
this.versionVector = drv.getRegionVersionVector();
this.compressorClassName = drv.getCompressorClassName();
this.compressor = drv.getCompressor();
this.enableOffHeapMemory = drv.getEnableOffHeapMemory();
if (drv instanceof PlaceHolderDiskRegion) {
this.setRVVTrusted(((PlaceHolderDiskRegion) drv).getRVVTrusted());
}
} else {
//This is a brand new disk region.
this.ds = ds;
// {
// DiskRegion existingDr = ds.getByName(name);
// if (existingDr != null) {
// throw new IllegalStateException("DiskRegion named " + name + " already exists with id=" + existingDr.getId());
// }
// }
this.id = ds.generateRegionId();
this.flags = EnumSet.noneOf(DiskRegionFlag.class);
this.onlineMembers = new CopyOnWriteHashSet();
this.offlineMembers = new CopyOnWriteHashSet();
this.equalMembers = new CopyOnWriteHashSet();
this.isRecreated = false;
this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID());
this.numOverflowOnDisk = new AtomicLong();
this.numEntriesInVM = new AtomicLong();
this.numOverflowBytesOnDisk = new AtomicLong();
}
this.logger = ds.logger;
}
protected AbstractDiskRegion(DiskStoreImpl ds, long id) {
this.ds = ds;
this.id = id;
this.flags = EnumSet.noneOf(DiskRegionFlag.class);
this.onlineMembers = new CopyOnWriteHashSet();
this.offlineMembers = new CopyOnWriteHashSet();
this.equalMembers = new CopyOnWriteHashSet();
this.isRecreated = true;
this.backup = true;
this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID());
this.logger = ds.logger;
this.numOverflowOnDisk = new AtomicLong();
this.numEntriesInVM = new AtomicLong();
this.numOverflowBytesOnDisk = new AtomicLong();
}
/**
* Used to initialize a PlaceHolderDiskRegion for a region that is being closed
* @param drv the region that is being closed
*/
protected AbstractDiskRegion(DiskRegionView drv) {
this.ds = drv.getDiskStore();
this.id = drv.getId();
this.uuid = drv.getUUID();
this.backup = drv.isBackup();
this.clearOplogEntryId = drv.getClearOplogEntryId();
this.clearRVV = drv.getClearRVV();
this.lruAlgorithm = drv.getLruAlgorithm();
this.lruAction = drv.getLruAction();
this.lruLimit = drv.getLruLimit();
this.concurrencyLevel = drv.getConcurrencyLevel();
this.initialCapacity = drv.getInitialCapacity();
this.loadFactor = drv.getLoadFactor();
this.statisticsEnabled = drv.getStatisticsEnabled();
this.isBucket = drv.isBucket();
this.flags = drv.getFlags();
this.partitionName = drv.getPartitionName();
this.startingBucketId = drv.getStartingBucketId();
this.myInitializingId = null; // fixes 43650
this.myInitializedId = drv.getMyPersistentID();
this.aboutToDestroy = false;
this.aboutToDestroyDataStorage = false;
this.onlineMembers = new CopyOnWriteHashSet(drv.getOnlineMembers());
this.offlineMembers = new CopyOnWriteHashSet(drv.getOfflineMembers());
this.equalMembers = new CopyOnWriteHashSet(drv.getOfflineAndEqualMembers());
this.isRecreated = true;
this.numOverflowOnDisk = new AtomicLong();
this.numEntriesInVM = new AtomicLong();
this.numOverflowBytesOnDisk = new AtomicLong();
this.entries = drv.getRecoveredEntryMap();
this.readyForRecovery = drv.isReadyForRecovery();
this.recoveredEntryCount = 0; // fix for bug 41570
this.recoveryCompleted = ((AbstractDiskRegion)drv).recoveryCompleted;
this.versionVector = drv.getRegionVersionVector();
this.compressorClassName = drv.getCompressorClassName();
this.compressor = drv.getCompressor();
this.enableOffHeapMemory = drv.getEnableOffHeapMemory();
this.logger = ds.logger;
}
////////////////////// Instance Methods //////////////////////
public abstract String getName();
public final DiskStoreImpl getDiskStore() {
return this.ds;
}
abstract void beginDestroyRegion(LocalRegion region);
public void resetRVV() {
this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID());
}
public final Long getId() {
return this.id;
}
public final long getUUID() {
return this.uuid;
}
public long getClearOplogEntryId() {
return this.clearOplogEntryId;
}
public void setClearOplogEntryId(long v) {
this.clearOplogEntryId = v;
}
public RegionVersionVector getClearRVV() {
return this.clearRVV;
}
public void setClearRVV(RegionVersionVector rvv) {
this.clearRVV = rvv;
}
public void setConfig(byte lruAlgorithm, byte lruAction, int lruLimit,
int concurrencyLevel, int initialCapacity,
float loadFactor, boolean statisticsEnabled,
boolean isBucket, EnumSet flags,
long uuid, String partitionName, int startingBucketId,
String compressorClassName, boolean enableOffHeapMemory) {
this.lruAlgorithm = lruAlgorithm;
this.lruAction = lruAction;
this.lruLimit = lruLimit;
this.concurrencyLevel = concurrencyLevel;
this.initialCapacity = initialCapacity;
this.loadFactor = loadFactor;
this.statisticsEnabled = statisticsEnabled;
this.isBucket = isBucket;
if (flags != null && flags != this.flags) {
this.flags.clear();
this.flags.addAll(flags);
}
this.uuid = uuid;
this.partitionName = partitionName;
this.startingBucketId = startingBucketId;
this.compressorClassName = compressorClassName;
if (!ds.isOffline()) {
createCompressorFromClassName();
}
this.enableOffHeapMemory = enableOffHeapMemory;
if (this.getCompressor() != null && this.enableOffHeapMemory) {
GemFireCacheImpl gfc = GemFireCacheImpl.getInstance();
if (gfc != null) {
if (gfc.getOffHeapStore() != null) {
gfc.getOffHeapStore().setCompressor(getCompressor());
}
}
}
}
public void createCompressorFromClassName() {
if (Strings.isNullOrEmpty(compressorClassName)) {
compressor = null;
} else {
try {
@SuppressWarnings("unchecked")
Class compressorClass = (Class) ClassPathLoader.getLatest().forName(compressorClassName);
this.compressor = compressorClass.newInstance();
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException(LocalizedStrings.DiskInitFile_UNKNOWN_COMPRESSOR_0_FOUND
.toLocalizedString(compressorClassName), e);
} catch (InstantiationException e) {
throw new IllegalArgumentException(LocalizedStrings.DiskInitFile_UNKNOWN_COMPRESSOR_0_FOUND
.toLocalizedString(compressorClassName), e);
} catch (IllegalAccessException e) {
throw new IllegalArgumentException(LocalizedStrings.DiskInitFile_UNKNOWN_COMPRESSOR_0_FOUND
.toLocalizedString(compressorClassName), e);
}
}
}
public EvictionAttributesImpl getEvictionAttributes() {
return new EvictionAttributesImpl()
.setAlgorithm(getActualLruAlgorithm())
.setAction(getActualLruAction())
.internalSetMaximum(getLruLimit());
}
public byte getLruAlgorithm() {
return this.lruAlgorithm;
}
public EvictionAlgorithm getActualLruAlgorithm() {
return EvictionAlgorithm.parseValue(getLruAlgorithm());
}
public byte getLruAction() {
return this.lruAction;
}
public EvictionAction getActualLruAction() {
return EvictionAction.parseValue(getLruAction());
}
public int getLruLimit() {
return this.lruLimit;
}
public int getConcurrencyLevel() {
return this.concurrencyLevel;
}
public int getInitialCapacity() {
return this.initialCapacity;
}
public float getLoadFactor() {
return this.loadFactor;
}
public boolean getStatisticsEnabled() {
return this.statisticsEnabled;
}
public boolean isBucket() {
return this.isBucket;
}
@Override
public EnumSet getFlags() {
return this.flags;
}
public String getPartitionName() {
return this.partitionName;
}
public int getStartingBucketId() {
return this.startingBucketId;
}
public String getPrName() {
assert isBucket();
String bn = PartitionedRegionHelper.getBucketName(getName());
return PartitionedRegionHelper.getPRPath(bn);
}
private PersistentMemberID myInitializingId = null;
private PersistentMemberID myInitializedId = null;
private final CopyOnWriteHashSet onlineMembers;
private final CopyOnWriteHashSet offlineMembers;
private final CopyOnWriteHashSet equalMembers;
public PersistentMemberID addMyInitializingPMID(PersistentMemberID pmid) {
PersistentMemberID result = this.myInitializingId;
this.myInitializingId = pmid;
if(result != null) {
this.myInitializedId = result;
}
return result;
}
public void markInitialized() {
assert this.myInitializingId != null;
this.myInitializedId = this.myInitializingId;
this.myInitializingId = null;
}
public boolean addOnlineMember(PersistentMemberID pmid) {
return this.onlineMembers.add(pmid);
}
public boolean addOfflineMember(PersistentMemberID pmid) {
return this.offlineMembers.add(pmid);
}
public boolean addOfflineAndEqualMember(PersistentMemberID pmid) {
return this.equalMembers.add(pmid);
}
public boolean rmOnlineMember(PersistentMemberID pmid) {
return this.onlineMembers.remove(pmid);
}
public boolean rmOfflineMember(PersistentMemberID pmid) {
return this.offlineMembers.remove(pmid);
}
public boolean rmEqualMember(PersistentMemberID pmid) {
return this.equalMembers.remove(pmid);
}
public void markBeginDestroyRegion() {
this.aboutToDestroy = true;
}
public void markBeginDestroyDataStorage() {
this.aboutToDestroyDataStorage = true;
}
public void markEndDestroyRegion() {
this.onlineMembers.clear();
this.offlineMembers.clear();
this.equalMembers.clear();
this.myInitializedId = null;
this.myInitializingId = null;
this.aboutToDestroy = false;
this.isRecreated = false;
}
public void markEndDestroyDataStorage() {
this.myInitializedId = null;
this.myInitializingId = null;
this.aboutToDestroyDataStorage = false;
}
// PersistentMemberView methods
public PersistentMemberID getMyInitializingID() {
DiskInitFile dif = this.ds.getDiskInitFile();
if (dif == null) return this.myInitializingId;
synchronized (dif) {
return this.myInitializingId;
}
}
public PersistentMemberID getMyPersistentID() {
DiskInitFile dif = this.ds.getDiskInitFile();
if (dif == null) return this.myInitializedId;
synchronized (dif) {
return this.myInitializedId;
}
}
public Set getOnlineMembers() {
DiskInitFile dif = this.ds.getDiskInitFile();
if (dif == null) return this.onlineMembers.getSnapshot();
synchronized (dif) {
return this.onlineMembers.getSnapshot();
}
}
public Set getOfflineMembers() {
DiskInitFile dif = this.ds.getDiskInitFile();
if (dif == null) return this.offlineMembers.getSnapshot();
synchronized (dif) {
return this.offlineMembers.getSnapshot();
}
}
public Set getOfflineAndEqualMembers() {
DiskInitFile dif = this.ds.getDiskInitFile();
if (dif == null) return this.equalMembers.getSnapshot();
synchronized (dif) {
return this.equalMembers.getSnapshot();
}
}
public Set getRevokedMembers() {
//DiskInitFile dif = this.ds.getDiskInitFile();
return ds.getRevokedMembers();
}
public void memberOffline(PersistentMemberID persistentID) {
this.ds.memberOffline(this, persistentID);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - member offline " + persistentID);
}
}
public void memberOfflineAndEqual(PersistentMemberID persistentID) {
this.ds.memberOfflineAndEqual(this, persistentID);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - member offline and equal " + persistentID);
}
}
public void memberOnline(PersistentMemberID persistentID) {
this.ds.memberOnline(this, persistentID);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - member online " + persistentID);
}
}
public void memberRemoved(PersistentMemberID persistentID) {
this.ds.memberRemoved(this, persistentID);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - member removed " + persistentID);
}
}
public void memberRevoked(PersistentMemberPattern revokedPattern) {
this.ds.memberRevoked(revokedPattern);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - member revoked " + revokedPattern);
}
}
public void setInitializing(PersistentMemberID newId) {
this.ds.setInitializing(this, newId);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - initializing local id: " + getMyInitializingID());
}
}
public void setInitialized() {
this.ds.setInitialized(this);
if(TRACE_VIEW || logger.fineEnabled()) {
logger.info(LocalizedStrings.DEBUG, "PersistentView " + getDiskStoreID().abbrev() + " - " + this.getName() + " - initialized local id: " + getMyPersistentID());
}
}
public PersistentMemberID generatePersistentID() {
return this.ds.generatePersistentID(this);
}
public boolean isRecreated() {
return this.isRecreated;
}
public boolean hasConfigChanged() {
return this.configChanged;
}
public void setConfigChanged(boolean v) {
this.configChanged = v;
}
public void endDestroy(LocalRegion region) {
//Clean up the state if we were ready to recover this region
if(isReadyForRecovery()) {
ds.updateDiskRegion(this);
this.entriesMapIncompatible = false;
if (this.entries != null) {
CustomEntryConcurrentHashMap