com.gemstone.gemfire.internal.cache.DiskInitFile Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-core Show documentation
Show all versions of gemfire-core Show documentation
SnappyData store based off Pivotal GemFireXD
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import com.gemstone.gemfire.CancelCriterion;
import com.gemstone.gemfire.CancelException;
import com.gemstone.gemfire.DataSerializer;
import com.gemstone.gemfire.Instantiator;
import com.gemstone.gemfire.cache.DiskAccessException;
import com.gemstone.gemfire.cache.EvictionAction;
import com.gemstone.gemfire.cache.EvictionAlgorithm;
import com.gemstone.gemfire.cache.RegionAttributes;
import com.gemstone.gemfire.cache.RegionDestroyedException;
import com.gemstone.gemfire.compression.Compressor;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.FileUtil;
import com.gemstone.gemfire.internal.HeapDataOutputStream;
import com.gemstone.gemfire.internal.InternalDataSerializer;
import com.gemstone.gemfire.internal.InternalInstantiator;
import com.gemstone.gemfire.internal.InternalInstantiator.InstantiatorAttributesHolder;
import com.gemstone.gemfire.internal.cache.persistence.CanonicalIdHolder;
import com.gemstone.gemfire.internal.cache.persistence.DiskExceptionHandler;
import com.gemstone.gemfire.internal.cache.persistence.DiskInitFileInterpreter;
import com.gemstone.gemfire.internal.cache.persistence.DiskInitFileParser;
import com.gemstone.gemfire.internal.cache.persistence.DiskRegionView;
import com.gemstone.gemfire.internal.cache.persistence.DiskStoreID;
import com.gemstone.gemfire.internal.cache.persistence.PRPersistentConfig;
import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberID;
import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberPattern;
import com.gemstone.gemfire.internal.cache.versions.DiskRegionVersionVector;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionHolder;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.shared.Version;
import com.gemstone.gnu.trove.TIntHashSet;
import com.gemstone.gnu.trove.TLongHashSet;
import com.gemstone.gnu.trove.TLongIterator;
import io.snappydata.collection.IntObjectHashMap;
/**
* Does all the IF file work for a DiskStoreImpl.
*
* @author Darrel Schneider
*
* @since prPersistSprint1
*/
public class DiskInitFile implements DiskInitFileInterpreter {
public static final String IF_FILE_EXT = ".if";
/** Indicates the end of valid records in the oplog. Note that if this
* is set to something other than zero, openRAF2 chould change to
* initialize the byte array we write to the file.
*/
public static final byte IF_EOF_ID = 0;
public static final byte END_OF_RECORD_ID = 21;
static final int OPLOG_FILE_ID_REC_SIZE = 1+8+1;
/**
* Written to IF
* Byte Format:
* 8: leastSigBits of UUID
* 8: mostSigBits of UUID
* 1: EndOfRecordMarker
*/
public static final byte IFREC_DISKSTORE_ID = 56;
/**
* Written to IF
* Byte Format:
* 4: instantiatorID
* 4: classNameLength
* classNameLength: className bytes
* 4: instClassNameLength
* instClassNameLength: instClassName bytes
* 1: EndOfRecordMarker
*/
public static final byte IFREC_INSTANTIATOR_ID = 57;
/**
* Written to IF
* Byte Format:
* 4: classNameLength
* classNameLength: className bytes
* 1: EndOfRecordMarker
*/
public static final byte IFREC_DATA_SERIALIZER_ID = 58;
/**
* Written to IF
* Used to say that persistent member id is online.
* Byte Format:
* RegionId
* 4: blobLength
* blobLength: member bytes
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_ONLINE_MEMBER_ID = 59;
/**
* Written to IF
* Used to say that persistent member id is offline.
* Byte Format:
* RegionId
* 4: blobLength
* blobLength: member bytes
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_OFFLINE_MEMBER_ID = 60;
/**
* Written to IF
* Used to say that persistent member id no longer exists.
* Byte Format:
* RegionId
* 4: blobLength
* blobLength: member bytes
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_RM_MEMBER_ID = 61;
/**
* Written to IF.
* Used to record the persistent member id of this file.
* Byte Format:
* RegionId
* 4: blobLength
* blobLength: member bytes
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_MY_MEMBER_INITIALIZING_ID = 62;
/**
* Written to IF.
* Used to record the previous my member id completed initialization.
* Byte Format:
* RegionId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_MY_MEMBER_INITIALIZED_ID = 63;
/**
* Written to IF.
* Used to record create of region
* Byte Format:
* RegionId
* 4: nameLength
* nameLength: name bytes
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_CREATE_REGION_ID = 64;
/**
* Written to IF.
* Used to record begin of destroy of region
* Byte Format:
* RegionId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_BEGIN_DESTROY_REGION_ID = 65;
/**
* Written to IF.
* Used to record clear of region
* Byte Format:
* RegionId
* 8: oplogEntryId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_CLEAR_REGION_ID = 66;
/**
* Written to IF.
* Used to record that the end of a destroy region.
* Byte Format:
* RegionId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_END_DESTROY_REGION_ID = 67;
/**
* Written to IF.
* Used to record that a region is about to be partially destroyed
* Byte Format:
* RegionId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID = 68;
/**
* Written to IF.
* Used to record that a region is partially destroyed
* Byte Format:
* RegionId
* 1: EndOfRecordMarker
* @since prPersistSprint1
*/
public static final byte IFREC_END_PARTIAL_DESTROY_REGION_ID = 69;
/**
* Records the creation of an oplog crf file
* Byte Format:
* 8: oplogId
* 1: EndOfRecord
*/
public static final byte IFREC_CRF_CREATE = 70;
/**
* Records the creation of an oplog drf file
* Byte Format:
* 8: oplogId
* 1: EndOfRecord
*/
public static final byte IFREC_DRF_CREATE = 71;
/**
* Records the deletion of an oplog crf file
* Byte Format:
* 8: oplogId
* 1: EndOfRecord
*/
public static final byte IFREC_CRF_DELETE = 72;
/**
* Records the deletion of an oplog drf file
* Byte Format:
* 8: oplogId
* 1: EndOfRecord
*/
public static final byte IFREC_DRF_DELETE = 73;
/**
* Written to IF. Used to record regions config Byte Format: RegionId
* 1: lruAlgorithm
* 1: lruAction
* 4: lruLimit (int) // no need to ObjectSize during
* recovery since all data is in blob form
* 4: concurrencyLevel (int)
* 4: initialCapacity (int)
* 4: loadFactor (float)
* 1: statisticsEnabled (boolean)
* 1: isBucket (boolean)
* 1: hasRedundantCopy (boolean) (GEMFIREXD only)
* 1: deferRecovery (boolean) (GemFireXD only)
* 1: EndOfRecordMarker
*
* Used to read the region configuration for 6.5 disk stores.
*
*/
public static final byte IFREC_REGION_CONFIG_ID = 74;
/*
* Written to IF
* Used to say that persistent member id is offline and has the same data on disk as this member
* Byte Format:
* RegionId
* 4: blobLength
* blobLength: member bytes
* 1: EndOfRecordMarker
* @since prPersistSprint3
*/
public static final byte IFREC_OFFLINE_AND_EQUAL_MEMBER_ID = 75;
/**
* Written to IF.
* Used to record regions config
* Byte Format:
* RegionId
* 1: lruAlgorithm
* 1: lruAction
* 4: lruLimit (int)
* // no need to ObjectSize during recovery since all data is in blob form
* 4: concurrencyLevel (int)
* 4: initialCapacity (int)
* 4: loadFactor (float)
* 1: statisticsEnabled (boolean)
* 1: isBucket (boolean)
* 1: hasRedundantCopy (boolean) (GEMFIREXD only)
* 1: deferRecovery (boolean) (GemFireXD only)
* 4: length of partitionName String bytes (int)
* length:actual bytes
* 4: startingBucketId(int)
* 1: EndOfRecordMarker
*/
public static final byte IFREC_REGION_CONFIG_ID_66 = 76;
/**
* Records the creation of an oplog krf file
* The presence of this record indicates that the krf file
* is complete.
* Byte Format:
* 8: oplogId
* 1: EndOfRecord
*/
public static final byte IFREC_KRF_CREATE = 77;
/**
* Records the creation of a persistent partitioned
* region configuration.
* Byte Format:
* variable: pr name
* 4: total num buckets
* variable: colocated with
* 1: EndOfRecord
*/
public static final byte IFREC_PR_CREATE = 78;
/**
* Records the deletion of persistent partitioned
* region.
* Byte Format:
* variable: pr name
* 1: EndOfRecord
*/
public static final byte IFREC_PR_DESTROY = 79;
/**
* Maps a member id (either a disk store ID or a distributed system id
* plus a byte) to a single integer, which can be used in oplogs.
*
* Byte Format:
* 4: the number assigned to this id.
* variable: serialized object representing the ID.
* variable: pr name
* 1: EndOfRecord
*/
public static final byte IFREC_ADD_CANONICAL_MEMBER_ID = 80;
/**
* Written to IF
* Used to say that a disk store has been revoked
* Byte Format:
* variable: a PersistentMemberPattern
* @since 7.0
*/
public static final byte IFREC_REVOKE_DISK_STORE_ID = 81;
/**
* Written gemfire version to IF
* Byte Format:
* 1: version byte from Version.GFE_CURRENT.ordinal
* 1: EndOfRecord
* @since 7.0
*/
public static final byte IFREC_GEMFIRE_VERSION = 82;
/**
* Written to IF.
* Used to record clear of using an RVV
* Byte Format:
* RegionId
* variable: serialized RVV
* 1: EndOfRecordMarker
* @since 7.0
*/
public static final byte IFREC_CLEAR_REGION_WITH_RVV_ID = 83;
/**
* Written to IF. Used to record regions config Byte Format: RegionId 1:
* lruAlgorithm 1: lruAction 4: lruLimit (int) // no need to ObjectSize during
* recovery since all data is in blob form 4: concurrencyLevel (int) 4:
* initialCapacity (int) 4: loadFactor (float) 1: statisticsEnabled (boolean)
* 1: isBucket (boolean) variable: compressorClassName 1: enableOffHeapMemory (boolean)
* 1: EndOfRecordMarker
*
*/
public static final byte IFREC_REGION_CONFIG_ID_75 = 88;
/**
* Written to IF.
* Used to record regions config
* Byte Format:
* RegionId
* 1: lruAlgorithm
* 1: lruAction
* 4: lruLimit (int)
* // no need to ObjectSize during recovery since all data is in blob form
* 4: concurrencyLevel (int)
* 4: initialCapacity (int)
* 4: loadFactor (float)
* 1: statisticsEnabled (boolean)
* 1: isBucket (boolean)
* 1: hasRedundantCopy (boolean)
* 1: deferRecovery (boolean)
* 8: uuid (long)
* 4: length of partitionName String bytes (int)
* length:actual bytes
* 4: startingBucketId(int)
* 1: EndOfRecordMarker
*/
public static final byte IFREC_REGION_CONFIG_ID_71 = 91;
/**
* Written to the if. Records the creation of a successful
* IRF file creation
*/
public static final byte IFREC_IRF_CREATE = 92;
/**
* Written to the if. Records the deletion of an IRF file due to failure to
* read, or improper file etc.
*/
public static final byte IFREC_IRF_DELETE = 93;
/**
* when an index is created then write the corresponding ddlId
* in the IF file. The idea is that if an index is dropped and
* re-created with a different definition then the persisted
* irf can be validated.
*/
public static final byte IFREC_INDEX_CREATE = 94;
/**
* when an index is deleted then write the corresponding ddlId
* in the IF file. The idea is that if an index is dropped then
* the persisted irf can be ignored.
*/
public static final byte IFREC_INDEX_DELETE = 95;
private final DiskStoreImpl parent;
private final File ifFile;
private RandomAccessFile ifRAF;
private boolean closed;
// contains the ids of dataSerializers already written to IF
private final TIntHashSet dsIds;
// contains the ids of instantiators already written to IF
private final TIntHashSet instIds;
private final TLongHashSet crfIds;
private final TLongHashSet drfIds;
private final TLongHashSet krfIds;
private final TLongHashSet irfIds;
private final HashSet createdIndexIds;
private final HashSet deletedIndexIds;
/**
* Map used to keep track of regions we know of from the DiskInitFile
* but that do not yet exist (they have not yet been recovered or they have been closed).
*/
private final Map drMap = new HashMap();
private final Map drMapByName = new HashMap();
/**
* Map of persistent partitioned regions configurations that are stored in this
* init file.
*/
private final Map prMap =
new HashMap();
private final InternalDataSerializer.RegistrationListener regListener;
private int ifLiveRecordCount = 0;
private int ifTotalRecordCount = 0;
private boolean compactInProgress;
// the recovered version
private Version gfversion;
/**
* Used to calculate the highest oplog entry id we have seen
* in a clear entry.
*/
private long clearOplogEntryIdHWM = DiskStoreImpl.INVALID_ID;
/**
* Container for canonical ids held in the disk store. Member ids
* are canonicalized so they can be written as an integer in the
* oplogs.
*/
private final CanonicalIdHolder canonicalIdHolder = new CanonicalIdHolder();
/**
* Set of members that have been revoked. We keep track of the revoked
* members so that we can indicate to the user a member has been revoked,
* rather is simply conflicting
*/
private final Set revokedMembers = new HashSet();
/**
* Lock used to synchronize access to the init file.
* This is a lock rather than a synchronized block
* because the backup tool needs to acquire this lock.
*/
private final BackupLock lock = new BackupLock();
transient private long nextSeekPosition;
transient private boolean gotEOF;
private void recoverFromFailedCompaction() {
File tmpFile = getTempFile();
if (tmpFile.exists()) {
// if the temp init file exists then we must have crashed during a compaction.
// In this case we need to destroy the non temp file and rename the temp file.
if (this.ifFile.exists()) {
if (!this.ifFile.delete()) {
throw new IllegalStateException("Could not delete " + this.ifFile);
}
if (!tmpFile.renameTo(this.ifFile)) {
throw new IllegalStateException("Could not rename " + tmpFile
+ " to " + this.ifFile);
}
}
}
}
public DiskStoreImpl getDiskStore() {
return this.parent;
}
public Version currentRecoveredGFVersion() {
return this.gfversion;
}
DiskStoreID recover() {
recoverFromFailedCompaction();
if (!this.ifFile.exists()) {
// nothing to recover
// Instead of calling randomUUID which uses SecureRandom which can be slow
// return UUID.randomUUID();
// create a UUID using the cheaper Random class.
// [sumedh] performance does not matter here, so stick with randomUUID()
return new DiskStoreID(UUID.randomUUID());
}
DiskStoreID result = null;
try {
FileInputStream fis = null;
CountingDataInputStream dis = null;
try {
fis = new FileInputStream(this.ifFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
DiskInitFileParser parser = new DiskInitFileParser(dis, this, getLogger());
result = parser.parse();
this.gotEOF = parser.gotEOF();
this.nextSeekPosition = dis.getCount();
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: liveRecordCount="
+ this.ifLiveRecordCount
+ " totalRecordCount="
+ this.ifTotalRecordCount);
}
}
finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
for (PlaceHolderDiskRegion drv: this.drMap.values()) {
if (drv.getMyPersistentID() != null
|| drv.getMyInitializingID() != null) {
// Prepare each region we found in the init file for early recovery.
if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG preparing for early recovery " + drv);
if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
}
getDiskStore().getStats().incUncreatedRecoveredRegions(1);
drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv,
getDiskStore(), getDiskStore().getInternalRegionArguments()));
if (!getDiskStore().isOffline()) {
// schedule it for recovery since we want to recovery region data early now
getDiskStore().scheduleForRecovery(drv);
}
// else if we are validating or offlineCompacting
// then the scheduleForRecovery is called later in DiskStoreImpl
// this helps fix bug 42043
}
}
}
}
catch (EOFException ex) {
// ignore since a partial record write can be caused by a crash
// throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
// .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
}
catch (ClassNotFoundException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
}
catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
}
catch (CancelException ignore) {
if (getLogger().fineEnabled()) {
getLogger().fine("Oplog::readOplog:Error in recovery as Cache was closed",
ignore);
}
}
catch (RegionDestroyedException ignore) {
if (getLogger().fineEnabled()) {
getLogger().fine(
"Oplog::readOplog:Error in recovery as Region was destroyed",
ignore);
}
}
catch (IllegalStateException ex) {
if (!this.parent.isClosing()) {
throw ex;
}
}
return result;
}
public void cmnClearRegion(long drId, long clearOplogEntryId) {
DiskRegionView drv = getDiskRegionById(drId);
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile IFREC_CLEAR_REGION_ID drId=" + drId + " clearOplogEntryId=" + clearOplogEntryId);
if (drv.getClearOplogEntryId() == DiskStoreImpl.INVALID_ID) {
this.ifLiveRecordCount++;
}
// otherwise previous clear is cancelled so don't change liveRecordCount
this.ifTotalRecordCount++;
drv.setClearOplogEntryId(clearOplogEntryId);
if(clearOplogEntryId > clearOplogEntryIdHWM) {
clearOplogEntryIdHWM = clearOplogEntryId;
}
}
public void cmnClearRegion(long drId, ConcurrentHashMap> memberToVersion) {
DiskRegionView drv = getDiskRegionById(drId);
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile IFREC_CLEAR_REGION_ID drId=" + drId + " clearOplogEntryId=" + clearOplogEntryId);
if (drv.getClearRVV() == null) {
this.ifLiveRecordCount++;
}
// otherwise previous clear is cancelled so don't change liveRecordCount
this.ifTotalRecordCount++;
DiskStoreID ownerId = parent.getDiskStoreID();
//Create a fake RVV for clear purposes. We only need to memberToVersion information
RegionVersionHolder ownerExceptions = memberToVersion.remove(ownerId);
long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
RegionVersionVector rvv = new DiskRegionVersionVector(ownerId,
memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false,
ownerExceptions);
drv.setClearRVV(rvv);
}
private int liveRegions = 0; // added for bug 41618
public boolean hasLiveRegions() {
lock.lock(false);
try {
return this.liveRegions > 0;
} finally {
lock.unlock();
}
}
public void cmnCreateRegion(long drId, String regName) {
recoverDiskRegion(drId, regName);
this.liveRegions++;
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnRegionConfig(long drId, byte lruAlgorithm, byte lruAction, int lruLimit,
int concurrencyLevel, int initialCapacity,
float loadFactor, boolean statisticsEnabled,
boolean isBucket, EnumSet flags,
long uuid, String partitionName, int startingBucketId,
String compressorClassName, boolean enableOffHeapMemory) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
GemFireCacheImpl.StaticSystemCallbacks sysCb =
GemFireCacheImpl.FactoryStatics.systemCallbacks;
//We need to add the IS_WITH_VERSIONING to persistent regions
//during the upgrade. Previously, all regions had versioning enabled
//but now only regions that have this flag will have versioning enabled.
//
// We don't want gateway queues to turn on versioning. Unfortunately, the only
// way to indentify that a region is a gateway queue is by the region
// name.
if(Version.GFE_75.compareTo(currentRecoveredGFVersion()) > 0
&& !dr.getName().contains("_SERIAL_GATEWAY_SENDER_QUEUE")
&& !dr.getName().contains("_PARALLEL_QUEUE_")
&& sysCb == null) {
flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
}
dr.setConfig(lruAlgorithm, lruAction, lruLimit,
concurrencyLevel, initialCapacity, loadFactor,
statisticsEnabled, isBucket, flags, uuid,
partitionName, startingBucketId,
compressorClassName, enableOffHeapMemory);
// Just count this as a live record even though it is possible
// that we have an extra one due to the config changing while
// we were offline.
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
public boolean cmnPRCreate(String name, PRPersistentConfig config) {
if(this.prMap.put(name, config) == null) {
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
this.liveRegions++;
return true;
}
return false;
}
public void cmnGemfireVersion(Version version) {
this.gfversion = version;
}
public boolean cmnPRDestroy(String name) {
if(this.prMap.remove(name) != null) {
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
this.liveRegions--;
return true;
}
return false;
}
@Override
public void cmnAddCanonicalMemberId(int id, Object object) {
this.canonicalIdHolder.addMapping(id, object);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnRmMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
// if (dr == null) {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile no region for drId=" + drId);
// }
if (dr != null) {
if (!dr.rmOnlineMember(pmid)) {
if(!dr.rmOfflineMember(pmid)) {
dr.rmEqualMember(pmid);
}
}
// since we removed a member don't inc the live count
// In fact decrement it by one since both this record
// and the previous one are both garbage.
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
public void cmnOfflineMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
dr.addOfflineMember(pmid);
if (dr.rmOnlineMember(pmid) || dr.rmEqualMember(pmid)) {
this.ifLiveRecordCount--;
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
public void cmdOfflineAndEqualMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
if (this.parent.isUpgradeVersionOnly(this) &&
Version.GFE_70.compareTo(currentRecoveredGFVersion()) > 0) {
dr.addOnlineMember(pmid);
if (dr.rmOfflineMember(pmid)) {
this.ifLiveRecordCount--;
}
} else {
dr.addOfflineAndEqualMember(pmid);
if (dr.rmOnlineMember(pmid) || dr.rmOfflineMember(pmid)) {
this.ifLiveRecordCount--;
}
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
public void cmnOnlineMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
dr.addOnlineMember(pmid);
if (dr.rmOfflineMember(pmid) || dr.rmEqualMember(pmid)) {
this.ifLiveRecordCount--;
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
public void cmnDataSerializerId(Class dsc) {
if (dsc != null) {
DataSerializer ds = InternalDataSerializer.register(dsc, /*dsId,*/ true);
this.dsIds.add(ds.getId());
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnInstantiatorId(int id, Class c, Class ic) {
if (c != null && ic != null) {
InternalInstantiator.register(c, ic, id, true);
this.instIds.add(id);
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnInstantiatorId(int id, String cn, String icn) {
if (cn != null && icn != null) {
InternalInstantiator.register(cn, icn, id, true);
this.instIds.add(id);
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnCrfCreate(long oplogId) {
this.crfIds.add(oplogId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnDrfCreate(long oplogId) {
this.drfIds.add(oplogId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnKrfCreate(long oplogId) {
this.krfIds.add(oplogId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnIrfCreate(long oplogId) {
this.irfIds.add(oplogId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnIrfDelete(long oplogId) {
this.irfIds.remove(oplogId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnIndexCreate(String indexId) {
this.createdIndexIds.add(indexId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public void cmnIndexDelete(String indexId) {
this.deletedIndexIds.add(indexId);
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
public boolean cmnCrfDelete(long oplogId) {
if(this.krfIds.remove(oplogId)) {
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
}
if (this.crfIds.remove(oplogId)) {
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
return true;
} else {
return false;
}
}
public boolean cmnDrfDelete(long oplogId) {
if (this.drfIds.remove(oplogId)) {
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
return true;
} else {
return false;
}
}
public boolean isCRFOplogIdPresent(long crfId) {
return this.crfIds.contains(crfId);
}
public boolean isDRFOplogIdPresent(long drfId) {
return this.drfIds.contains(drfId);
}
public void verifyOplogs(TLongHashSet foundCrfs, TLongHashSet foundDrfs) {
verifyOplogs(foundCrfs, foundDrfs, this.crfIds, this.drfIds);
}
public void verifyOplogs(TLongHashSet foundCrfs, TLongHashSet foundDrfs, TLongHashSet expectedCrfIds, TLongHashSet expectedDrfIds) {
TLongHashSet missingCrfs = calcMissing(foundCrfs, expectedCrfIds);
TLongHashSet missingDrfs = calcMissing(foundDrfs, expectedDrfIds);
// Note that finding extra ones is ok; it is possible we died just
// after creating one but before we could record it in the if file
// Or died just after deleting it but before we could record it in the if file.
boolean failed = false;
String msg = null;
if (!missingCrfs.isEmpty()) {
failed = true;
msg = "*.crf files with these ids: "
+ Arrays.toString(missingCrfs.toArray());
}
if (!missingDrfs.isEmpty()) {
failed = true;
if (msg == null) {
msg = "";
} else {
msg += ", ";
}
msg += "*.drf files with these ids: "
+ Arrays.toString(missingDrfs.toArray());
}
if (failed) {
msg = "The following required files could not be found: " + msg + ".";
throw new IllegalStateException(msg);
}
}
private TLongHashSet calcMissing(TLongHashSet found, TLongHashSet expected) {
TLongHashSet missing = new TLongHashSet(expected.toArray());
missing.removeAll(found.toArray());
return missing;
}
boolean hasKrf(long oplogId) {
return krfIds.contains(oplogId);
}
boolean hasIrf(long oplogId) {
return irfIds.contains(oplogId);
}
DiskRegionView takeDiskRegionByName(String name) {
lock.lock(false);
try {
DiskRegionView result = this.drMapByName.remove(name);
if (result != null) {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: takeDiskRegionByName found=" + name, new RuntimeException("STACK"));
this.drMap.remove(result.getId());
// } else {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: takeDiskRegionByName DID NOT find=" + name, new RuntimeException("STACK"));
}
return result;
} finally {
lock.unlock();
}
}
Map getDRMap() {
lock.lock(false);
try {
return new HashMap(drMap);
} finally {
lock.unlock();
}
}
DiskRegion createDiskRegion(DiskStoreImpl dsi, String name,
boolean isBucket, boolean isPersistBackup,
boolean overflowEnabled, boolean isSynchronous,
DiskRegionStats stats, CancelCriterion cancel,
DiskExceptionHandler exceptionHandler,
RegionAttributes ra, EnumSet flags,
long uuid,
String partitionName, int startingBucketId,
Compressor compressor, boolean enableOffHeapMemory) {
lock.lock(false);
try {
// need to call the constructor and addDiskRegion while synced
DiskRegion result = new DiskRegion(dsi, name, isBucket, isPersistBackup,
overflowEnabled, isSynchronous,
stats, cancel, exceptionHandler, ra, flags, uuid,
partitionName, startingBucketId,
compressor == null ? null : compressor.getClass().getName(), enableOffHeapMemory);
dsi.addDiskRegion(result);
return result;
} finally {
lock.unlock();
}
}
DiskRegionView getDiskRegionByName(String name) {
lock.lock(false);
try {
return this.drMapByName.get(name);
} finally {
lock.unlock();
}
}
DiskRegionView getDiskRegionByPrName(String name) {
lock.lock(false);
try {
for (PlaceHolderDiskRegion dr: this.drMapByName.values()) {
if (dr.isBucket()) {
if (name.equals(dr.getPrName())) {
return dr;
}
}
}
return null;
} finally {
lock.unlock();
}
}
private DiskRegionView getDiskRegionById(Long drId) {
DiskRegionView result = this.drMap.get(drId);
if (result == null) {
result = this.parent.getById(drId);
}
return result;
}
private void recoverDiskRegion(long drId, String regName) {
// Whatever the last create region drId we see we remember
// in the DiskStore. Note that this could be a region that is destroyed
// (we will not know until we see a later destroy region record)
// but since drId's can wrap around into negative numbers whatever
// the last one we see is the right one to remember.
this.parent.recoverRegionId(drId);
PlaceHolderDiskRegion dr = new PlaceHolderDiskRegion(this.parent, drId, regName);
Object old = this.drMap.put(drId, dr);
assert old == null;
PlaceHolderDiskRegion oldDr = this.drMapByName.put(regName, dr);
if (oldDr != null) {
this.drMap.remove(oldDr.getId()); // fix for bug 42043
}
// don't schedule for recovery until we know it was not destroyed
}
/**
* Maximum number of bytes used to encode a DiskRegion id.
*/
static final int DR_ID_MAX_BYTES = 9;
private void writeIFRecord(byte b, DiskRegionView dr) {
assert lock.isHeldByCurrentThread();
try {
ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+1);
bb.put(b);
putDiskRegionID(bb, dr.getId());
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb, false); // don't do stats for these small records
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, DiskRegionView dr, long v) {
assert lock.isHeldByCurrentThread();
try {
ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+8+1);
bb.put(b);
putDiskRegionID(bb, dr.getId());
bb.putLong(v);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb, false); // don't do stats for these small records
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, long v) {
assert lock.isHeldByCurrentThread();
try {
ByteBuffer bb = getIFWriteBuffer(OPLOG_FILE_ID_REC_SIZE);
bb.put(b);
bb.putLong(v);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb, false); // don't do stats for these small records
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, DiskRegionView dr, String s) {
assert lock.isHeldByCurrentThread();
try {
int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(s)+1;
if (hdosSize < 32) {
hdosSize = 32;
}
HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
hdos.write(b);
writeDiskRegionID(hdos, dr.getId());
hdos.writeUTF(s);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, String s) {
assert lock.isHeldByCurrentThread();
try {
int hdosSize = 1 + estimateByteSize(s) + 1;
if (hdosSize < 32) {
hdosSize = 32;
}
HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize,
Version.CURRENT);
hdos.write(b);
hdos.writeUTF(s);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae = new DiskAccessException(
LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0
.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, long regionId, String fileName, Object compactorInfo) {
assert lock.isHeldByCurrentThread();
try {
int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(fileName)+1;
if (hdosSize < 32) {
hdosSize = 32;
}
HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
hdos.write(b);
writeDiskRegionID(hdos, regionId);
hdos.writeUTF(fileName);
//TODO - plum the correct compactor info to this point, to optimize
//serialization
DataSerializer.writeObject(compactorInfo, hdos);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void writeIFRecord(byte b, long regionId, String fileName) {
assert lock.isHeldByCurrentThread();
try {
int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(fileName)+1;
if (hdosSize < 32) {
hdosSize = 32;
}
HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
hdos.write(b);
writeDiskRegionID(hdos, regionId);
hdos.writeUTF(fileName);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private int estimateByteSize(String s) {
return s == null ? 0 : ((s.length()+1)*3);
}
private void writePMIDRecord(byte opcode, DiskRegionView dr, PersistentMemberID pmid, boolean doStats) {
assert lock.isHeldByCurrentThread();
try {
byte[] pmidBytes = pmidToBytes(pmid);
ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+4+pmidBytes.length+1);
bb.put(opcode);
putDiskRegionID(bb, dr.getId());
bb.putInt(pmidBytes.length);
bb.put(pmidBytes);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb, doStats);
} catch (IOException ex) {
DiskAccessException dae
= new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae, true);
}
throw dae;
}
}
private void putDiskRegionID(ByteBuffer bb, long drId) {
// If the drId is <= 255 (max unsigned byte) then
// encode it as a single byte.
// Otherwise write a byte whose value is the number of bytes
// it will be encoded by and then follow it with that many bytes.
// Note that drId are not allowed to have a value in the range 1..8 inclusive.
if (drId >= 0 && drId <= 255) {
bb.put((byte)drId);
} else {
byte bytesNeeded = (byte)Oplog.bytesNeeded(drId);
bb.put(bytesNeeded);
byte[] bytes = new byte[bytesNeeded];
for (int i=bytesNeeded-1; i >=0; i--) {
bytes[i] = (byte)(drId & 0xFF);
drId >>=8;
}
bb.put(bytes);
}
}
static void writeDiskRegionID(DataOutput dos, long drId) throws IOException {
// If the drId is <= 255 (max unsigned byte) then
// encode it as a single byte.
// Otherwise write a byte whose value is the number of bytes
// it will be encoded by and then follow it with that many bytes.
// Note that drId are not allowed to have a value in the range 1..8 inclusive.
if (drId >= 0 && drId <= 255) {
dos.write((byte) drId);
} else {
byte bytesNeeded = (byte)Oplog.bytesNeeded(drId);
dos.write(bytesNeeded);
byte[] bytes = new byte[bytesNeeded];
for (int i=bytesNeeded-1; i >=0; i--) {
bytes[i] = (byte)(drId & 0xFF);
drId >>=8;
}
dos.write(bytes);
}
}
static long readDiskRegionID(DataInput dis) throws IOException {
int bytesToRead = dis.readUnsignedByte();
if (bytesToRead <= DiskStoreImpl.MAX_RESERVED_DRID
&& bytesToRead >= DiskStoreImpl.MIN_RESERVED_DRID) {
long result = dis.readByte(); // we want to sign extend this first byte
bytesToRead--;
while (bytesToRead > 0) {
result <<= 8;
result |= dis.readUnsignedByte(); // no sign extension
bytesToRead--;
}
return result;
} else {
return bytesToRead;
}
}
private void cmnAddMyInitializingPMID(DiskRegionView dr, PersistentMemberID pmid) {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: AddMyInitializingPMID dr=" + dr);
if (dr != null) {
if (dr.addMyInitializingPMID(pmid) == null) {
this.ifLiveRecordCount++;
}
this.ifTotalRecordCount++;
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
private void cmnMarkInitialized(DiskRegionView dr) {
// getLogger().info(LocalizedStrings.DEBUG, "DEBUG: markInitialized dr=" + dr);
// dec since this initializeId is overriding a previous one
// It actually doesn't override myInitializing
//this.ifLiveRecordCount--;
// don't count this as a record in the totalRecCount
if (dr != null) {
dr.markInitialized();
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
private void cmnBeginDestroyRegion(DiskRegionView dr) {
// don't count it is a small record
if (dr != null) {
dr.markBeginDestroyRegion();
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
private void cmnEndDestroyRegion(DiskRegionView dr) {
// Figure out how may other records this freed up.
if (dr != null) {
if (dr.getClearOplogEntryId() != DiskStoreImpl.INVALID_ID) {
// one for the clear record
this.ifLiveRecordCount--;
}
// one for each online member
this.ifLiveRecordCount -= dr.getOnlineMembers().size();
// one for each offline member
this.ifLiveRecordCount -= dr.getOfflineMembers().size();
// one for each equal member
this.ifLiveRecordCount -= dr.getOfflineAndEqualMembers().size();
// one for the CREATE_REGION
this.ifLiveRecordCount--;
// one for the regions memberId
if (dr.getMyPersistentID() != null) {
this.ifLiveRecordCount--;
}
this.liveRegions--;
this.drMap.remove(dr.getId());
this.drMapByName.remove(dr.getName());
this.parent.rmById(dr.getId());
dr.markEndDestroyRegion();
} else {
if (DiskStoreImpl.TRACE_RECOVERY) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
private void cmnBeginPartialDestroyRegion(DiskRegionView dr) {
// count the begin as both live and total
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
dr.markBeginDestroyDataStorage();
}
private void cmnEndPartialDestroyRegion(DiskRegionView dr) {
// no need to count this small record
// Figure out how may other records this freed up.
if (dr.getClearOplogEntryId() != DiskStoreImpl.INVALID_ID) {
// one for the clear record
this.ifLiveRecordCount--;
}
// Figure out how may other records this freed up.
if (dr.getMyPersistentID() != null) {
// one for the regions memberId
this.ifLiveRecordCount--;
}
dr.markEndDestroyDataStorage();
}
/**
* Write the specified instantiator to the file.
*/
private void saveInstantiator(Instantiator inst) {
saveInstantiator(inst.getId(), inst.getClass().getName(), inst
.getInstantiatedClass().getName());
}
private void saveInstantiator(int id, String instantiatorClassName,
String instantiatedClassName) {
lock.lock();
try {
if (!this.compactInProgress && this.instIds.contains(id)) {
// instantiator already written to disk so just return
return;
}
final byte[] classNameBytes = classNameToBytes(instantiatorClassName);
final byte[] instClassNameBytes = classNameToBytes(instantiatedClassName);
ByteBuffer bb = getIFWriteBuffer(1 + 4
+ 4 + classNameBytes.length
+ 4 + instClassNameBytes.length
+ 1);
bb.put(IFREC_INSTANTIATOR_ID);
bb.putInt(id);
bb.putInt(classNameBytes.length);
bb.put(classNameBytes);
bb.putInt(instClassNameBytes.length);
bb.put(instClassNameBytes);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb);
}
catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_SAVING_INSTANTIATOR_TO_DISK_BECAUSE_0.toLocalizedString(ex), this.parent);
} finally {
lock.unlock();
}
}
private void saveInstantiators() {
Object[] objects = InternalInstantiator
.getInstantiatorsForSerialization();
for (Object obj : objects) {
if (obj instanceof Instantiator) {
saveInstantiator((Instantiator)obj);
} else {
InstantiatorAttributesHolder iah = (InstantiatorAttributesHolder)obj;
saveInstantiator(iah.getId(), iah.getInstantiatorClassName(),
iah.getInstantiatedClassName());
}
}
}
/**
* Returns the bytes used to represent a class in an oplog.
*/
static private byte[] classToBytes(Class c) {
return classNameToBytes(c.getName());
}
/**
* Returns the bytes used to represent a class in an oplog.
*/
static private byte[] classNameToBytes(String cn) {
return cn.getBytes(); // use default encoder
}
/**
* Write the specified DataSerializer to the file.
*/
private void saveDataSerializer(DataSerializer ds) {
lock.lock();
try {
if (!this.compactInProgress && this.dsIds.contains(ds.getId())) {
// dataSerializer already written to disk so just return
return;
}
final byte[] classNameBytes = classToBytes(ds.getClass());
ByteBuffer bb = getIFWriteBuffer(1 + 4 + classNameBytes.length + 1);
bb.put(IFREC_DATA_SERIALIZER_ID);
bb.putInt(classNameBytes.length);
bb.put(classNameBytes);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb);
}
catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_SAVING_DATA_SERIALIZER_TO_DISK_BECAUSE_0.toLocalizedString(ex), this.parent);
} finally {
lock.unlock();
}
}
private void saveDataSerializers() {
DataSerializer[] dataSerializers = InternalDataSerializer.getSerializers();
for (int i = 0; i < dataSerializers.length; i++) {
saveDataSerializer(dataSerializers[i]);
}
}
private void saveGemfireVersion() {
if (this.gfversion == null) {
this.gfversion = Version.CURRENT;
}
writeGemfireVersion(this.gfversion);
}
private void stopListeningForDataSerializerChanges() {
if (this.regListener != null) {
InternalDataSerializer.removeRegistrationListener(this.regListener);
}
}
private LogWriterI18n getLogger() {
return this.parent.logger;
}
public long getMaxRecoveredClearEntryId() {
return clearOplogEntryIdHWM;
}
private ByteBuffer getIFWriteBuffer(int size) {
return ByteBuffer.allocate(size);
}
private void writeIFRecord(ByteBuffer bb) throws IOException {
writeIFRecord(bb, true);
}
private void writeIFRecord(ByteBuffer bb, boolean doStats)
throws IOException
{
assert lock.isHeldByCurrentThread();
if (this.closed) {
throw new DiskAccessException("The disk store is closed", parent);
}
// if (this.closed) {
// throw new DiskAccessException("Init file is closed!", parent);
// }
this.ifRAF.write(bb.array(), 0, bb.position());
if (DiskStoreImpl.TRACE_WRITES) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_WRITES: DiskInitFile writeIFRecord bb[0] = "
+ bb.array()[0]
// , new RuntimeException("STACK")
);
}
if (doStats) {
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
compactIfNeeded();
}
private void writeIFRecord(HeapDataOutputStream hdos, boolean doStats) throws IOException
{
assert lock.isHeldByCurrentThread();
if (this.closed) {
throw new DiskAccessException("The disk store is closed", parent);
}
hdos.sendTo(this.ifRAF);
if (DiskStoreImpl.TRACE_WRITES) {
getLogger().info(LocalizedStrings.DEBUG, "TRACE_WRITES: DiskInitFile writeIFRecord HDOS"
// , new RuntimeException("STACK")
);
}
if (doStats) {
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
compactIfNeeded();
}
/**
* If the file is smaller than this constant then it
* does not need to be compacted.
*/
private static final long MIN_SIZE_BEFORE_COMPACT = 1024 * 1024;
/**
* If the ratio of live vs. dead is not less than this constant
* then no need to compact.
*/
private static final double COMPACT_RATIO = 0.5;
private void compactIfNeeded() {
lock.lock(false);
try {
if (this.compactInProgress) return;
if (this.ifTotalRecordCount == 0) return;
if (this.ifTotalRecordCount == this.ifLiveRecordCount) return;
if (this.ifRAF.length() <= MIN_SIZE_BEFORE_COMPACT) return;
if ((double)this.ifLiveRecordCount / (double)this.ifTotalRecordCount > COMPACT_RATIO) return;
compact();
} catch (IOException ignore) {
return;
} finally {
lock.unlock();
}
}
private File getTempFile() {
return new File(this.ifFile.getAbsolutePath()+"tmp");
}
public File getIFFile() {
return this.ifFile;
}
private void compact() {
lock.lock(false);
this.compactInProgress = true;
try {
try {
this.ifRAF.close();
} catch (IOException ignore) {
}
// rename the old file to tmpFile
File tmpFile = getTempFile();
if (this.ifFile.renameTo(tmpFile)) {
boolean success = false;
try {
// create the new file
openRAF();
// fill the new file with data
writeLiveData();
success = true;
// delete the old file
if (!tmpFile.delete()) {
throw new DiskAccessException("could not delete temporary file " + tmpFile, this.parent);
}
} catch (DiskAccessException ignore) {
getLogger().warning(LocalizedStrings.DEBUG,"Exception compacting init file " +this, ignore);
} finally {
if(!success) {
//if we failed
// close the new one and delete it
try {
this.ifRAF.close();
} catch (IOException ignore2) {
}
if (!this.ifFile.delete()) {
throw new DiskAccessException("could not delete file " + this.ifFile, this.parent);
}
if(!tmpFile.renameTo(this.ifFile)) {
throw new DiskAccessException("could not rename file " + tmpFile + " to " + this.ifFile, this.parent);
}
// reopen the old file since we couldn't write the new one
openRAF();
// reset the counts to 0 so we will try a compaction again
// in the future but not right away.
this.ifLiveRecordCount = 0;
this.ifTotalRecordCount = 0;
}
}
} else {
// reopen the old file since we couldn't rename it
openRAF();
// reset the counts to 0 so we will try a compaction again
// in the future but not right away.
this.ifLiveRecordCount = 0;
this.ifTotalRecordCount = 0;
}
} finally {
this.compactInProgress = false;
lock.unlock();
}
}
public void copyTo(File targetDir) throws IOException {
lock.lock(false);
try {
FileUtil.copy(this.ifFile, targetDir);
} finally {
lock.unlock();
}
}
private void openRAF() {
if (DiskStoreImpl.PREALLOCATE_IF) {
openRAF2();
return;
}
try {
this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
long len = this.ifRAF.length();
if (len != 0) {
this.ifRAF.seek(len);
}
} catch (IOException ex) {
throw new DiskAccessException(
LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile
.getPath()), ex, this.parent);
}
}
protected String getFileMode() {
return DiskStoreImpl.SYNC_IF_WRITES || parent.getSyncWrites() ? "rwd" : "rw";
}
private void openRAF2() {
try {
this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
long len = this.ifRAF.length();
if (len != 0) {
// this.ifRAF.seek(len);
if (this.gotEOF) {
this.ifRAF.seek(this.nextSeekPosition - 1);
}
else {
this.ifRAF.seek(this.nextSeekPosition);
}
}
else {
// pre-allocate the if file using some percentage of max Oplog size but
// with max of 10M and min of 1M
long maxSizeInMB = Math.min(
Math.max(this.parent.getMaxOplogSize() / 200L, 1L), 10L);
byte[] buffer = new byte[(1024 * 1024)];
for (int i = 0; i < maxSizeInMB; i++) {
this.ifRAF.write(buffer);
}
this.ifRAF.seek(0L);
}
} catch (IOException ex) {
throw new DiskAccessException(
LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile
.getPath()), ex, this.parent);
}
}
/**
* Write all live data to the init file
*/
private void writeLiveData() {
lock.lock(false);
try {
this.ifLiveRecordCount = 0;
this.ifTotalRecordCount = 0;
writeDiskStoreId();
saveGemfireVersion();
saveInstantiators();
saveDataSerializers();
saveCrfIds();
saveDrfIds();
saveKrfIds();
saveIrfIds();
for (DiskRegionView drv: this.drMap.values()) {
writeLiveData(drv);
}
for (DiskRegionView drv: this.parent.getDiskRegions()) {
writeLiveData(drv);
}
savePRConfigs();
saveCanonicalIds();
saveRevokedMembers();
getLogger().info(LocalizedStrings.DEBUG, "After compacting init file lrc=" + this.ifLiveRecordCount + " trc=" + this.ifTotalRecordCount);
} finally {
lock.unlock();
}
}
private void saveCrfIds() {
for (TLongIterator i = this.crfIds.iterator(); i.hasNext(); ) {
writeIFRecord(IFREC_CRF_CREATE, i.next());
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
}
private void saveDrfIds() {
for (TLongIterator i = this.drfIds.iterator(); i.hasNext(); ) {
writeIFRecord(IFREC_DRF_CREATE, i.next());
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
}
private void saveKrfIds() {
for (TLongIterator i = this.krfIds.iterator(); i.hasNext(); ) {
writeIFRecord(IFREC_KRF_CREATE, i.next());
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
}
private void saveIrfIds() {
for (TLongIterator i = this.irfIds.iterator(); i.hasNext(); ) {
writeIFRecord(IFREC_IRF_CREATE, i.next());
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
}
private void savePRConfigs() {
for(Map.Entry entry : prMap.entrySet()) {
writePRCreate(entry.getKey(), entry.getValue());
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
}
}
private void saveCanonicalIds() {
IntObjectHashMap