All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.gemstone.gemfire.internal.cache.DiskInitFile Maven / Gradle / Ivy

There is a newer version: 2.0-BETA
Show newest version
/*
 * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You
 * may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License. See accompanying
 * LICENSE file.
 */
package com.gemstone.gemfire.internal.cache;

import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

import com.gemstone.gemfire.CancelCriterion;
import com.gemstone.gemfire.CancelException;
import com.gemstone.gemfire.DataSerializer;
import com.gemstone.gemfire.Instantiator;
import com.gemstone.gemfire.cache.DiskAccessException;
import com.gemstone.gemfire.cache.EvictionAction;
import com.gemstone.gemfire.cache.EvictionAlgorithm;
import com.gemstone.gemfire.cache.RegionAttributes;
import com.gemstone.gemfire.cache.RegionDestroyedException;
import com.gemstone.gemfire.compression.Compressor;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.FileUtil;
import com.gemstone.gemfire.internal.HeapDataOutputStream;
import com.gemstone.gemfire.internal.InternalDataSerializer;
import com.gemstone.gemfire.internal.InternalInstantiator;
import com.gemstone.gemfire.internal.InternalInstantiator.InstantiatorAttributesHolder;
import com.gemstone.gemfire.internal.cache.persistence.*;
import com.gemstone.gemfire.internal.cache.versions.DiskRegionVersionVector;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionHolder;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.shared.Version;
import com.gemstone.gnu.trove.TIntHashSet;
import com.gemstone.gnu.trove.TLongHashSet;
import com.gemstone.gnu.trove.TLongIterator;
import org.eclipse.collections.impl.map.mutable.primitive.IntObjectHashMap;

/**
 * Does all the IF file work for a DiskStoreImpl.
 *
 * @author Darrel Schneider
 *
 * @since prPersistSprint1
 */
public class DiskInitFile implements DiskInitFileInterpreter {
  public static final String IF_FILE_EXT = ".if";

  /** Indicates the end of valid records in the oplog. Note that if this
   * is set to something other than zero, openRAF2 chould change to 
   * initialize the byte array we write to the file.
   */
  public static final byte IF_EOF_ID = 0;
  public static final byte END_OF_RECORD_ID = 21;
  static final int OPLOG_FILE_ID_REC_SIZE = 1+8+1;

  /**
   * Written to IF
   * Byte Format:
   *  8: leastSigBits of UUID
   *  8: mostSigBits of UUID
   *  1: EndOfRecordMarker
   */
  public static final byte IFREC_DISKSTORE_ID = 56;
  
  /**
   * Written to IF
   * Byte Format:
   *  4: instantiatorID
   *  4: classNameLength
   *  classNameLength: className bytes
   *  4: instClassNameLength
   *  instClassNameLength: instClassName bytes
   *  1: EndOfRecordMarker
   */
  public static final byte IFREC_INSTANTIATOR_ID = 57;

  /**
   * Written to IF
   * Byte Format:
   *  4: classNameLength
   *  classNameLength: className bytes
   *  1: EndOfRecordMarker
   */
  public static final byte IFREC_DATA_SERIALIZER_ID = 58;
  /**
   * Written to IF
   * Used to say that persistent member id is online.
   * Byte Format:
   *   RegionId
   *   4: blobLength
   *   blobLength: member bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_ONLINE_MEMBER_ID = 59;
  /**
   * Written to IF
   * Used to say that persistent member id is offline.
   * Byte Format:
   *   RegionId
   *   4: blobLength
   *   blobLength: member bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_OFFLINE_MEMBER_ID = 60;
  /**
   * Written to IF
   * Used to say that persistent member id no longer exists.
   * Byte Format:
   *   RegionId
   *   4: blobLength
   *   blobLength: member bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_RM_MEMBER_ID = 61;
  /**
   * Written to IF.
   * Used to record the persistent member id of this file.
   * Byte Format:
   *   RegionId
   *   4: blobLength
   *   blobLength: member bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_MY_MEMBER_INITIALIZING_ID = 62;
  /**
   * Written to IF.
   * Used to record the previous my member id completed initialization.
   * Byte Format:
   *   RegionId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_MY_MEMBER_INITIALIZED_ID = 63;

  /**
   * Written to IF.
   * Used to record create of region
   * Byte Format:
   *   RegionId
   *   4: nameLength
   *   nameLength: name bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_CREATE_REGION_ID = 64;
  /**
   * Written to IF.
   * Used to record begin of destroy of region
   * Byte Format:
   *   RegionId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_BEGIN_DESTROY_REGION_ID = 65;
  /**
   * Written to IF.
   * Used to record clear of region
   * Byte Format:
   *   RegionId
   *   8: oplogEntryId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_CLEAR_REGION_ID = 66;
  /**
   * Written to IF.
   * Used to record that the end of a destroy region.
   * Byte Format:
   *   RegionId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_END_DESTROY_REGION_ID = 67;
  /**
   * Written to IF.
   * Used to record that a region is about to be partially destroyed
   * Byte Format:
   *   RegionId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID = 68;
  /**
   * Written to IF.
   * Used to record that a region is partially destroyed
   * Byte Format:
   *   RegionId
   *   1: EndOfRecordMarker
   * @since prPersistSprint1
   */
  public static final byte IFREC_END_PARTIAL_DESTROY_REGION_ID = 69;

  /**
   * Records the creation of an oplog crf file
   * Byte Format:
   * 8: oplogId
   * 1: EndOfRecord
   */
  public static final byte IFREC_CRF_CREATE = 70;

  /**
   * Records the creation of an oplog drf file
   * Byte Format:
   * 8: oplogId
   * 1: EndOfRecord
   */
  public static final byte IFREC_DRF_CREATE = 71;

  /**
   * Records the deletion of an oplog crf file
   * Byte Format:
   * 8: oplogId
   * 1: EndOfRecord
   */
  public static final byte IFREC_CRF_DELETE = 72;

  /**
   * Records the deletion of an oplog drf file
   * Byte Format:
   * 8: oplogId
   * 1: EndOfRecord
   */
  public static final byte IFREC_DRF_DELETE = 73;

  /**
   * Written to IF. Used to record regions config Byte Format: RegionId
   *   1: lruAlgorithm
   *   1: lruAction
   *   4: lruLimit (int) // no need to ObjectSize during
   *      recovery since all data is in blob form
   *   4: concurrencyLevel (int)
   *   4: initialCapacity (int)
   *   4: loadFactor (float)
   *   1: statisticsEnabled (boolean)
   *   1: isBucket (boolean)
   *   1: hasRedundantCopy (boolean) (GEMFIREXD only)
   *   1: deferRecovery (boolean) (GemFireXD only)
   *   1: EndOfRecordMarker
   * 
   * Used to read the region configuration for 6.5 disk stores.
   * 
   */
  public static final byte IFREC_REGION_CONFIG_ID = 74; 
  
  /*
   * Written to IF
   * Used to say that persistent member id is offline and has the same data on disk as this member
   * Byte Format:
   *   RegionId
   *   4: blobLength
   *   blobLength: member bytes
   *   1: EndOfRecordMarker
   * @since prPersistSprint3
   */
  public static final byte IFREC_OFFLINE_AND_EQUAL_MEMBER_ID = 75;

  /**
   * Written to IF.
   * Used to record regions config
   * Byte Format:
   *   RegionId
   *   1: lruAlgorithm
   *   1: lruAction
   *   4: lruLimit (int)
   *   // no need to ObjectSize during recovery since all data is in blob form
   *   4: concurrencyLevel (int)
   *   4: initialCapacity (int)
   *   4: loadFactor (float)
   *   1: statisticsEnabled (boolean)
   *   1: isBucket (boolean)
   *   1: hasRedundantCopy (boolean) (GEMFIREXD only)
   *   1: deferRecovery (boolean) (GemFireXD only)
   *   4: length of partitionName String bytes (int)
   *   length:actual bytes 
   *   4: startingBucketId(int)
   *   1: EndOfRecordMarker
   */
  public static final byte IFREC_REGION_CONFIG_ID_66 = 76;

  /**
   * Records the creation of an oplog krf file
   * The presence of this record indicates that the krf file
   * is complete.
   * Byte Format:
   * 8: oplogId
   * 1: EndOfRecord
   */
  public static final byte IFREC_KRF_CREATE = 77;
  
  /**
   * Records the creation of a persistent partitioned
   * region configuration.
   * Byte Format:
   * variable: pr name
   * 4: total num buckets
   * variable: colocated with
   * 1: EndOfRecord
   */
  public static final byte IFREC_PR_CREATE = 78;
  
  /**
   * Records the deletion of persistent partitioned
   * region.
   * Byte Format:
   * variable: pr name
   * 1: EndOfRecord
   */
  public static final byte IFREC_PR_DESTROY = 79;
  
  
  /**
   * Maps a member id (either a disk store ID or a distributed system id
   * plus a byte) to a single integer, which can be used in oplogs.
   * 
   * Byte Format:
   * 4: the number assigned to this id.
   * variable: serialized object representing the ID. 
   * variable: pr name
   * 1: EndOfRecord
   */
  public static final byte IFREC_ADD_CANONICAL_MEMBER_ID = 80;
  /**
   * Written to IF
   * Used to say that a disk store has been revoked
   * Byte Format:
   *   variable: a PersistentMemberPattern
   * @since 7.0
   */
  public static final byte IFREC_REVOKE_DISK_STORE_ID = 81;
  
  /**
   * Written gemfire version to IF
   * Byte Format:
   * 1: version byte from Version.GFE_CURRENT.ordinal
   * 1: EndOfRecord
   * @since 7.0
   */
  public static final byte IFREC_GEMFIRE_VERSION = 82;

  /**
   * Written to IF.
   * Used to record clear of using an RVV
   * Byte Format:
   *   RegionId
   *   variable: serialized RVV
   *   1: EndOfRecordMarker
   * @since 7.0
   */
  public static final byte IFREC_CLEAR_REGION_WITH_RVV_ID = 83;

  /**
   * Written to IF. Used to record regions config Byte Format: RegionId 1:
   * lruAlgorithm 1: lruAction 4: lruLimit (int) // no need to ObjectSize during
   * recovery since all data is in blob form 4: concurrencyLevel (int) 4:
   * initialCapacity (int) 4: loadFactor (float) 1: statisticsEnabled (boolean)
   * 1: isBucket (boolean) variable: compressorClassName 1: enableOffHeapMemory (boolean)
   * 1: EndOfRecordMarker
   * 
   */
  public static final byte IFREC_REGION_CONFIG_ID_75 = 88;
  
  /**
   * Written to IF.
   * Used to record regions config
   * Byte Format:
   *   RegionId
   *   1: lruAlgorithm
   *   1: lruAction
   *   4: lruLimit (int)
   *   // no need to ObjectSize during recovery since all data is in blob form
   *   4: concurrencyLevel (int)
   *   4: initialCapacity (int)
   *   4: loadFactor (float)
   *   1: statisticsEnabled (boolean)
   *   1: isBucket (boolean)
   *   1: hasRedundantCopy (boolean)
   *   1: deferRecovery (boolean)
   *   8: uuid (long)
   *   4: length of partitionName String bytes (int)
   *   length:actual bytes 
   *   4: startingBucketId(int)
   *   1: EndOfRecordMarker
   */
  public static final byte IFREC_REGION_CONFIG_ID_71 = 91;

  /**
   * Written to the if. Records the creation of a successful
   * IRF file creation
   */
  public static final byte IFREC_IRF_CREATE = 92;

  /**
   * Written to the if. Records the deletion of an IRF file due to failure to
   * read, or improper file etc.
   */
  public static final byte IFREC_IRF_DELETE = 93;

  /**
   * when an index is created then write the corresponding ddlId 
   * in the IF file. The idea is that if an index is dropped and
   * re-created with a different definition then the persisted
   * irf can be validated.
   */
  public static final byte IFREC_INDEX_CREATE = 94;

  /**
   * when an index is deleted then write the corresponding ddlId 
   * in the IF file. The idea is that if an index is dropped then
   * the persisted irf can be ignored.
   */
  public static final byte IFREC_INDEX_DELETE = 95;

  private final DiskStoreImpl parent;
  
  private final File ifFile;
  private RandomAccessFile ifRAF;
  private boolean closed;
  // contains the ids of dataSerializers already written to IF
  private final TIntHashSet dsIds;
  // contains the ids of instantiators already written to IF
  private final TIntHashSet instIds;

  private final TLongHashSet crfIds;
  private final TLongHashSet drfIds;
  private final TLongHashSet krfIds;
  private final TLongHashSet irfIds;

  private final HashSet createdIndexIds;
  private final HashSet deletedIndexIds;

  /**
   * Map used to keep track of regions we know of from the DiskInitFile
   * but that do not yet exist (they have not yet been recovered or they have been closed).
   */
  private final Map drMap = new HashMap();
  private final Map drMapByName = new HashMap();
  
  /**
   * Map of persistent partitioned regions configurations that are stored in this
   * init file.
   */
  private final Map prMap =
      new HashMap();

  private final InternalDataSerializer.RegistrationListener regListener;

  private int ifLiveRecordCount = 0;
  private int ifTotalRecordCount = 0;
  private boolean compactInProgress;
  // the recovered version
  private Version gfversion;
  
  
  /**
   * Used to calculate the highest oplog entry id we have seen
   * in a clear entry.
   */
  private long clearOplogEntryIdHWM = DiskStoreImpl.INVALID_ID;
  
  /**
   * Container for canonical ids held in the disk store. Member ids
   * are canonicalized so they can be written as an integer in the 
   * oplogs.
   */
  private final CanonicalIdHolder canonicalIdHolder = new CanonicalIdHolder();
  
  /**
   * Set of members that have been revoked. We keep track of the revoked
   * members so that we can indicate to the user a member has been revoked,
   * rather is simply conflicting
   */
  private final Set revokedMembers = new HashSet();
  
  /**
   * Lock used to synchronize access to the init file.
   * This is a lock rather than a synchronized block
   * because the backup tool needs to acquire this lock.
   */
  private final BackupLock lock = new BackupLock();

  transient private long nextSeekPosition;

  transient private boolean gotEOF;

  private void recoverFromFailedCompaction() {
    File tmpFile = getTempFile();
    if (tmpFile.exists()) {
      // if the temp init file exists then we must have crashed during a compaction.
      // In this case we need to destroy the non temp file and rename the temp file.
      if (this.ifFile.exists()) {
        if (!this.ifFile.delete()) {
          throw new IllegalStateException("Could not delete " + this.ifFile);
        }
        if (!tmpFile.renameTo(this.ifFile)) {
          throw new IllegalStateException("Could not rename " + tmpFile
                                          + " to " + this.ifFile);
        }
      }
    }
  }

  public DiskStoreImpl getDiskStore() {
    return this.parent;
  }

  public Version currentRecoveredGFVersion() {
    return this.gfversion;
  }
  
  DiskStoreID recover() {
    recoverFromFailedCompaction();
    if (!this.ifFile.exists()) {
      // nothing to recover
      // Instead of calling randomUUID which uses SecureRandom which can be slow
      // return UUID.randomUUID();
      // create a UUID using the cheaper Random class.
      // [sumedh] performance does not matter here, so stick with randomUUID()
      return new DiskStoreID(UUID.randomUUID());
    }
    DiskStoreID result = null;
    try {
      FileInputStream fis = null;
      CountingDataInputStream dis = null;
      try {
        fis = new FileInputStream(this.ifFile);
        dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
        DiskInitFileParser parser = new DiskInitFileParser(dis, this, getLogger());
        result = parser.parse();
        this.gotEOF = parser.gotEOF();
        this.nextSeekPosition = dis.getCount();
        if (DiskStoreImpl.TRACE_RECOVERY) {
          getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: liveRecordCount="
                           + this.ifLiveRecordCount
                           + " totalRecordCount="
                           + this.ifTotalRecordCount);
        }
      }
      finally {
        if (dis != null) {
          dis.close();
        }
        if (fis != null) {
          fis.close();
        }
      }
      for (PlaceHolderDiskRegion drv: this.drMap.values()) {
        if (drv.getMyPersistentID() != null
              || drv.getMyInitializingID() != null) {
          // Prepare each region we found in the init file for early recovery.
          if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
//             getLogger().info(LocalizedStrings.DEBUG, "DEBUG preparing for early recovery " + drv);
            if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
              drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
            }
            getDiskStore().getStats().incUncreatedRecoveredRegions(1);
            drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv,
                getDiskStore(), getDiskStore().getInternalRegionArguments()));
            if (!getDiskStore().isOffline()) {
              // schedule it for recovery since we want to recovery region data early now
              getDiskStore().scheduleForRecovery(drv);
            }
            // else if we are validating or offlineCompacting
            // then the scheduleForRecovery is called later in DiskStoreImpl
            // this helps fix bug 42043
          }
        }
      }
    }
    catch (EOFException ex) {
      // ignore since a partial record write can be caused by a crash
//       throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
//                                     .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
    }
    catch (ClassNotFoundException ex) {
      throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
          .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
    }
    catch (IOException ex) {
      throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
          .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
    }
    catch (CancelException ignore) {
      if (getLogger().fineEnabled()) {
        getLogger().fine("Oplog::readOplog:Error in recovery as Cache was closed",
            ignore);
      }
    }
    catch (RegionDestroyedException ignore) {
      if (getLogger().fineEnabled()) {
        getLogger().fine(
            "Oplog::readOplog:Error in recovery as Region was destroyed",
            ignore);
      }
    }
    catch (IllegalStateException ex) {
      if (!this.parent.isClosing()) {
        throw ex;
      }
    }
    return result;
  }
  
  public void cmnClearRegion(long drId, long clearOplogEntryId) {
    DiskRegionView drv = getDiskRegionById(drId);
//             getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile IFREC_CLEAR_REGION_ID drId=" + drId + " clearOplogEntryId=" + clearOplogEntryId);
    if (drv.getClearOplogEntryId() == DiskStoreImpl.INVALID_ID) {
      this.ifLiveRecordCount++;
    }
    // otherwise previous clear is cancelled so don't change liveRecordCount
    this.ifTotalRecordCount++;
    drv.setClearOplogEntryId(clearOplogEntryId);
    if(clearOplogEntryId > clearOplogEntryIdHWM) {
      clearOplogEntryIdHWM = clearOplogEntryId;
    }
  }
  
  public void cmnClearRegion(long drId, ConcurrentHashMap> memberToVersion) {
    DiskRegionView drv = getDiskRegionById(drId);
//             getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile IFREC_CLEAR_REGION_ID drId=" + drId + " clearOplogEntryId=" + clearOplogEntryId);
    if (drv.getClearRVV() == null) {
      this.ifLiveRecordCount++;
    }
    // otherwise previous clear is cancelled so don't change liveRecordCount
    this.ifTotalRecordCount++;
    
    DiskStoreID ownerId = parent.getDiskStoreID();
    //Create a fake RVV for clear purposes. We only need to memberToVersion information
    RegionVersionHolder ownerExceptions = memberToVersion.remove(ownerId);
    long  ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
    RegionVersionVector rvv = new DiskRegionVersionVector(ownerId,
        memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false,
        ownerExceptions);
    drv.setClearRVV(rvv);
  }

  private int liveRegions = 0; // added for bug 41618

  public boolean hasLiveRegions() {
    lock.lock(false);
    try {
      return this.liveRegions > 0;
    } finally {
      lock.unlock();
    }
  }
      
  public void cmnCreateRegion(long drId, String regName) {
    recoverDiskRegion(drId, regName);
    this.liveRegions++;
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }

  public void cmnRegionConfig(long drId, byte lruAlgorithm, byte lruAction, int lruLimit,
                              int concurrencyLevel, int initialCapacity,
                              float loadFactor, boolean statisticsEnabled,
                              boolean isBucket, EnumSet flags,
                              long uuid, String partitionName, int startingBucketId,
                              String compressorClassName, boolean enableOffHeapMemory) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
      GemFireCacheImpl.StaticSystemCallbacks sysCb =
          GemFireCacheImpl.FactoryStatics.systemCallbacks;
      //We need to add the IS_WITH_VERSIONING to persistent regions
      //during the upgrade. Previously, all regions had versioning enabled
      //but now only regions that have this flag will have versioning enabled.
      //
      // We don't want gateway queues to turn on versioning. Unfortunately, the only
      // way to indentify that a region is a gateway queue is by the region
      // name.
      if(Version.GFE_75.compareTo(currentRecoveredGFVersion()) > 0
          && !dr.getName().contains("_SERIAL_GATEWAY_SENDER_QUEUE")
          && !dr.getName().contains("_PARALLEL_QUEUE_")
          && sysCb == null) {
        flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
      }
    dr.setConfig(lruAlgorithm, lruAction, lruLimit,
                 concurrencyLevel, initialCapacity, loadFactor,
                 statisticsEnabled, isBucket, flags, uuid,
                 partitionName, startingBucketId,
                 compressorClassName, enableOffHeapMemory);
    
    // Just count this as a live record even though it is possible
    // that we have an extra one due to the config changing while
    // we were offline.
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }
  
  public boolean cmnPRCreate(String name, PRPersistentConfig config) {
    if(this.prMap.put(name, config) == null) {
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
      this.liveRegions++;
      return true;
    }
    
    return false;
  }

  public void cmnGemfireVersion(Version version) {
    this.gfversion = version;
  }

  public boolean cmnPRDestroy(String name) {
    if(this.prMap.remove(name) != null) {
      this.ifLiveRecordCount--;
      this.ifTotalRecordCount++;
      this.liveRegions--;
      return true;
    }
    return false;
  }
  
  @Override
  public void cmnAddCanonicalMemberId(int id, Object object) {
    this.canonicalIdHolder.addMapping(id, object);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }

  public void cmnRmMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
//             if (dr == null) {
//               getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile no region for drId=" + drId);
//             }
    if (dr != null) {
    if (!dr.rmOnlineMember(pmid)) {
      if(!dr.rmOfflineMember(pmid)) {
        dr.rmEqualMember(pmid);
      }
    }
    // since we removed a member don't inc the live count
    // In fact decrement it by one since both this record
    // and the previous one are both garbage.
    this.ifLiveRecordCount--;
    this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }

  public void cmnOfflineMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
      dr.addOfflineMember(pmid);
      if (dr.rmOnlineMember(pmid) || dr.rmEqualMember(pmid)) {
        this.ifLiveRecordCount--;
      }
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }
  
  public void cmdOfflineAndEqualMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
      if (this.parent.isUpgradeVersionOnly(this) &&
          Version.GFE_70.compareTo(currentRecoveredGFVersion()) > 0) {
        dr.addOnlineMember(pmid);
        if (dr.rmOfflineMember(pmid)) {
          this.ifLiveRecordCount--;
        }
      } else {
        dr.addOfflineAndEqualMember(pmid);
        if (dr.rmOnlineMember(pmid) || dr.rmOfflineMember(pmid)) {
          this.ifLiveRecordCount--;
        }
      }
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }

  public void cmnOnlineMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
    dr.addOnlineMember(pmid);
    if (dr.rmOfflineMember(pmid) || dr.rmEqualMember(pmid)) {
      this.ifLiveRecordCount--;
    }
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }

  public void cmnDataSerializerId(Class dsc) {
    if (dsc != null) {
      DataSerializer ds = InternalDataSerializer.register(dsc, /*dsId,*/ true);
      this.dsIds.add(ds.getId());
    }
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }

  public void cmnInstantiatorId(int id, Class c, Class ic) {
    if (c != null && ic != null) {
      InternalInstantiator.register(c, ic, id, true);
      this.instIds.add(id);
    }
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnInstantiatorId(int id, String cn, String icn) {
    if (cn != null && icn != null) {
      InternalInstantiator.register(cn, icn, id, true);
      this.instIds.add(id);
    }
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnCrfCreate(long oplogId) {
    this.crfIds.add(oplogId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnDrfCreate(long oplogId) {
    this.drfIds.add(oplogId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnKrfCreate(long oplogId) {
    this.krfIds.add(oplogId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnIrfCreate(long oplogId) {
    this.irfIds.add(oplogId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnIrfDelete(long oplogId) {
    this.irfIds.remove(oplogId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnIndexCreate(String indexId) {
    this.createdIndexIds.add(indexId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public void cmnIndexDelete(String indexId) {
    this.deletedIndexIds.add(indexId);
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  }
  public boolean cmnCrfDelete(long oplogId) {
    if(this.krfIds.remove(oplogId)) {
      this.ifLiveRecordCount--;
      this.ifTotalRecordCount++;
    }
    if (this.crfIds.remove(oplogId)) {
      this.ifLiveRecordCount--;
      this.ifTotalRecordCount++;
      return true;
    } else {
      return false;
    }
  }
  public boolean cmnDrfDelete(long oplogId) {
    if (this.drfIds.remove(oplogId)) {
      this.ifLiveRecordCount--;
      this.ifTotalRecordCount++;
      return true;
    } else {
      return false;
    }
  }
  
  public boolean isCRFOplogIdPresent(long crfId) {
    return this.crfIds.contains(crfId);
  }
  
  public boolean isDRFOplogIdPresent(long drfId) {
    return this.drfIds.contains(drfId);
  }

  public void verifyOplogs(TLongHashSet foundCrfs, TLongHashSet foundDrfs) {
    verifyOplogs(foundCrfs, foundDrfs, this.crfIds, this.drfIds);
  }
  
  public void verifyOplogs(TLongHashSet foundCrfs, TLongHashSet foundDrfs, TLongHashSet expectedCrfIds, TLongHashSet expectedDrfIds) {
    TLongHashSet missingCrfs = calcMissing(foundCrfs, expectedCrfIds);
    TLongHashSet missingDrfs = calcMissing(foundDrfs, expectedDrfIds);
    // Note that finding extra ones is ok; it is possible we died just
    // after creating one but before we could record it in the if file
    // Or died just after deleting it but before we could record it in the if file.
    boolean failed = false;
    String msg = null;
    if (!missingCrfs.isEmpty()) {
      failed = true;
      msg = "*.crf files with these ids: "
        + Arrays.toString(missingCrfs.toArray());
    }
    if (!missingDrfs.isEmpty()) {
      failed = true;
      if (msg == null) {
        msg = "";
      } else {
        msg += ", ";
      }
      msg += "*.drf files with these ids: "
        + Arrays.toString(missingDrfs.toArray());
    }
    if (failed) {
      msg = "The following required files could not be found: " + msg + ".";
      throw new IllegalStateException(msg);
    }
  }
  
  private TLongHashSet calcMissing(TLongHashSet found, TLongHashSet expected) {
    TLongHashSet missing = new TLongHashSet(expected.toArray());
    missing.removeAll(found.toArray());
    return missing;
  }

  boolean hasKrf(long oplogId) {
    return krfIds.contains(oplogId);
  }

  boolean hasIrf(long oplogId) {
    return irfIds.contains(oplogId);
  }

  DiskRegionView takeDiskRegionByName(String name) {
    lock.lock(false);
    try {
      DiskRegionView result = this.drMapByName.remove(name);
      if (result != null) {
        //       getLogger().info(LocalizedStrings.DEBUG, "DEBUG: takeDiskRegionByName found=" + name, new RuntimeException("STACK"));
        this.drMap.remove(result.getId());
        //     } else {
        //       getLogger().info(LocalizedStrings.DEBUG, "DEBUG: takeDiskRegionByName DID NOT find=" + name, new RuntimeException("STACK"));
      }
      return result;
    } finally {
      lock.unlock();
    }
  }

  Map getDRMap() {
    lock.lock(false);
    try {
      return new HashMap(drMap);
    } finally {
      lock.unlock();
    }
  }

  DiskRegion createDiskRegion(DiskStoreImpl dsi, String name,
                                     boolean isBucket, boolean isPersistBackup,
                                     boolean overflowEnabled, boolean isSynchronous,
                                     DiskRegionStats stats, CancelCriterion cancel,
                                     DiskExceptionHandler exceptionHandler,
                                     RegionAttributes ra, EnumSet flags,
                                     long uuid,
                                     String partitionName, int startingBucketId,
                                     Compressor compressor, boolean enableOffHeapMemory) {
    lock.lock(false);
    try {
      // need to call the constructor and addDiskRegion while synced
      DiskRegion result = new DiskRegion(dsi, name, isBucket, isPersistBackup,
          overflowEnabled, isSynchronous,
          stats, cancel, exceptionHandler, ra, flags, uuid,
          partitionName, startingBucketId,
          compressor == null ? null : compressor.getClass().getName(), enableOffHeapMemory);
      dsi.addDiskRegion(result);
      return result;
    } finally {
      lock.unlock();
    }
  }

  DiskRegionView getDiskRegionByName(String name) {
    lock.lock(false);
    try {
      return this.drMapByName.get(name);
    } finally {
      lock.unlock();
    }
  }

  DiskRegionView getDiskRegionByPrName(String name) {
    lock.lock(false);
    try {
      for (PlaceHolderDiskRegion dr: this.drMapByName.values()) {
        if (dr.isBucket()) {
          if (name.equals(dr.getPrName())) {
            return dr;
          }
        }
      }
      return null;
    } finally {
      lock.unlock();
    }
  }
  
  private DiskRegionView getDiskRegionById(Long drId) {
    DiskRegionView result = this.drMap.get(drId);
    if (result == null) {
      result = this.parent.getById(drId);
    }
    return result;
  }

  
  private void recoverDiskRegion(long drId, String regName) {
    // Whatever the last create region drId we see we remember
    // in the DiskStore. Note that this could be a region that is destroyed
    // (we will not know until we see a later destroy region record)
    // but since drId's can wrap around into negative numbers whatever
    // the last one we see is the right one to remember.
    this.parent.recoverRegionId(drId);
    PlaceHolderDiskRegion dr = new PlaceHolderDiskRegion(this.parent, drId, regName);
    Object old = this.drMap.put(drId, dr);
    assert old == null;
    PlaceHolderDiskRegion oldDr = this.drMapByName.put(regName, dr);
    if (oldDr != null) {
      this.drMap.remove(oldDr.getId()); // fix for bug 42043
    }
    // don't schedule for recovery until we know it was not destroyed
  }

  /**
   * Maximum number of bytes used to encode a DiskRegion id.
   */
  static final int DR_ID_MAX_BYTES = 9;

  private void writeIFRecord(byte b, DiskRegionView dr) {
    assert lock.isHeldByCurrentThread();
    try {
      ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+1);
      bb.put(b);
      putDiskRegionID(bb, dr.getId());
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  private void writeIFRecord(byte b, DiskRegionView dr, long v) {
    assert lock.isHeldByCurrentThread();
    try {
      ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+8+1);
      bb.put(b);
      putDiskRegionID(bb, dr.getId());
      bb.putLong(v);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  private void writeIFRecord(byte b, long v) {
    assert lock.isHeldByCurrentThread();
    try {
      ByteBuffer bb = getIFWriteBuffer(OPLOG_FILE_ID_REC_SIZE);
      bb.put(b);
      bb.putLong(v);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }

  private void writeIFRecord(byte b, DiskRegionView dr, String s) {
    assert lock.isHeldByCurrentThread();
    try {
      int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(s)+1;
      if (hdosSize < 32) {
        hdosSize = 32;
      }
      HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
      hdos.write(b);
      writeDiskRegionID(hdos, dr.getId());
      hdos.writeUTF(s);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, true);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }

  private void writeIFRecord(byte b, String s) {
    assert lock.isHeldByCurrentThread();
    try {
      int hdosSize = 1 + estimateByteSize(s) + 1;
      if (hdosSize < 32) {
        hdosSize = 32;
      }
      HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize,
          Version.CURRENT);
      hdos.write(b);
      hdos.writeUTF(s);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, true);
    } catch (IOException ex) {
      DiskAccessException dae = new DiskAccessException(
          LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0
              .toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }

  private void writeIFRecord(byte b, long regionId, String fileName, Object compactorInfo) {
    assert lock.isHeldByCurrentThread();
    try {
      int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(fileName)+1;
      if (hdosSize < 32) {
        hdosSize = 32;
      }
      HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
      hdos.write(b);
      writeDiskRegionID(hdos, regionId);
      hdos.writeUTF(fileName);
      //TODO - plum the correct compactor info to this point, to optimize
      //serialization
      DataSerializer.writeObject(compactorInfo, hdos);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, true);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private void writeIFRecord(byte b, long regionId, String fileName) {
    assert lock.isHeldByCurrentThread();
    try {
      int hdosSize = 1+DR_ID_MAX_BYTES+estimateByteSize(fileName)+1;
      if (hdosSize < 32) {
        hdosSize = 32;
      }
      HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
      hdos.write(b);
      writeDiskRegionID(hdos, regionId);
      hdos.writeUTF(fileName);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, true);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private int estimateByteSize(String s) {
    return s == null ? 0 : ((s.length()+1)*3);
  }
  
  private void writePMIDRecord(byte opcode, DiskRegionView dr, PersistentMemberID pmid, boolean doStats) {
    assert lock.isHeldByCurrentThread();
    try {
      byte[] pmidBytes = pmidToBytes(pmid);
      ByteBuffer bb = getIFWriteBuffer(1+DR_ID_MAX_BYTES+4+pmidBytes.length+1);
      bb.put(opcode);
      putDiskRegionID(bb, dr.getId());
      bb.putInt(pmidBytes.length);
      bb.put(pmidBytes);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, doStats);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }

  private void putDiskRegionID(ByteBuffer bb, long drId) {
    // If the drId is <= 255 (max unsigned byte) then
    // encode it as a single byte.
    // Otherwise write a byte whose value is the number of bytes
    // it will be encoded by and then follow it with that many bytes.
    // Note that drId are not allowed to have a value in the range 1..8 inclusive.
    if (drId >= 0 && drId <= 255) {
      bb.put((byte)drId);
    } else {
      byte bytesNeeded = (byte)Oplog.bytesNeeded(drId);
      bb.put(bytesNeeded);
      byte[] bytes = new byte[bytesNeeded];
      for (int i=bytesNeeded-1; i >=0; i--) {
        bytes[i] = (byte)(drId & 0xFF);
        drId >>=8;
      }
      bb.put(bytes);
    }
  }

  static void writeDiskRegionID(DataOutput dos, long drId) throws IOException {
    // If the drId is <= 255 (max unsigned byte) then
    // encode it as a single byte.
    // Otherwise write a byte whose value is the number of bytes
    // it will be encoded by and then follow it with that many bytes.
    // Note that drId are not allowed to have a value in the range 1..8 inclusive.
    if (drId >= 0 && drId <= 255) {
      dos.write((byte) drId);
    } else {
      byte bytesNeeded = (byte)Oplog.bytesNeeded(drId);
      dos.write(bytesNeeded);
      byte[] bytes = new byte[bytesNeeded];
      for (int i=bytesNeeded-1; i >=0; i--) {
        bytes[i] = (byte)(drId & 0xFF);
        drId >>=8;
      }
      dos.write(bytes);
    }
  }

  static long readDiskRegionID(DataInput dis) throws IOException {
    int bytesToRead = dis.readUnsignedByte();
    if (bytesToRead <= DiskStoreImpl.MAX_RESERVED_DRID
        && bytesToRead >= DiskStoreImpl.MIN_RESERVED_DRID) {
      long result = dis.readByte(); // we want to sign extend this first byte
      bytesToRead--;
      while (bytesToRead > 0) {
        result <<= 8;
        result |= dis.readUnsignedByte(); // no sign extension
        bytesToRead--;
      }
      return result;
    } else {
      return bytesToRead;
    }
  }
  
  private void cmnAddMyInitializingPMID(DiskRegionView dr, PersistentMemberID pmid) {
//     getLogger().info(LocalizedStrings.DEBUG, "DEBUG: AddMyInitializingPMID dr=" + dr);
    if (dr != null) {
    if (dr.addMyInitializingPMID(pmid) == null) {
      this.ifLiveRecordCount++;
    }
    this.ifTotalRecordCount++;
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }

  private void cmnMarkInitialized(DiskRegionView dr) {
//     getLogger().info(LocalizedStrings.DEBUG, "DEBUG: markInitialized dr=" + dr);
    // dec since this initializeId is overriding a previous one
    // It actually doesn't override myInitializing
    //this.ifLiveRecordCount--;
    // don't count this as a record in the totalRecCount
    if (dr != null) {
    dr.markInitialized();
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }
  private void cmnBeginDestroyRegion(DiskRegionView dr) {
    // don't count it is a small record

    if (dr != null) {
    dr.markBeginDestroyRegion();
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }
  private void cmnEndDestroyRegion(DiskRegionView dr) {
    // Figure out how may other records this freed up.

    if (dr != null) {
    if (dr.getClearOplogEntryId() != DiskStoreImpl.INVALID_ID) {
      // one for the clear record
      this.ifLiveRecordCount--;
    }
    // one for each online member
    this.ifLiveRecordCount -= dr.getOnlineMembers().size();
    // one for each offline member
    this.ifLiveRecordCount -= dr.getOfflineMembers().size();
    // one for each equal member
    this.ifLiveRecordCount -= dr.getOfflineAndEqualMembers().size();
    
    
    
    // one for the CREATE_REGION
    this.ifLiveRecordCount--;
    
    // one for the regions memberId
    if (dr.getMyPersistentID() != null) {
      this.ifLiveRecordCount--;
    }
    
    this.liveRegions--;
    this.drMap.remove(dr.getId());
    this.drMapByName.remove(dr.getName());
    this.parent.rmById(dr.getId());
    
    dr.markEndDestroyRegion();
    } else {
      if (DiskStoreImpl.TRACE_RECOVERY) {
        getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: bad disk region id!");
      } else {
        throw new IllegalStateException("bad disk region id");
      }
    }
  }
  private void cmnBeginPartialDestroyRegion(DiskRegionView dr) {
    // count the begin as both live and total
    this.ifLiveRecordCount++;
    this.ifTotalRecordCount++;
  
    dr.markBeginDestroyDataStorage();
  }
  private void cmnEndPartialDestroyRegion(DiskRegionView dr) {
    // no need to count this small record
    
    // Figure out how may other records this freed up.
    if (dr.getClearOplogEntryId() != DiskStoreImpl.INVALID_ID) {
      // one for the clear record
      this.ifLiveRecordCount--;
    }
    // Figure out how may other records this freed up.
    if (dr.getMyPersistentID() != null) {
      // one for the regions memberId
      this.ifLiveRecordCount--;
    }
    
    dr.markEndDestroyDataStorage();
  }
  
  /**
   * Write the specified instantiator to the file.
   */
  private void saveInstantiator(Instantiator inst) {
    saveInstantiator(inst.getId(), inst.getClass().getName(), inst
        .getInstantiatedClass().getName());
  }

  private void saveInstantiator(int id, String instantiatorClassName,
      String instantiatedClassName) {
    lock.lock();
    try {
      if (!this.compactInProgress && this.instIds.contains(id)) {
        // instantiator already written to disk so just return
        return;
      }
      final byte[] classNameBytes = classNameToBytes(instantiatorClassName);
      final byte[] instClassNameBytes = classNameToBytes(instantiatedClassName);
      ByteBuffer bb = getIFWriteBuffer(1 + 4
                                       + 4 + classNameBytes.length
                                       + 4 + instClassNameBytes.length
                                       + 1);
      bb.put(IFREC_INSTANTIATOR_ID);
      bb.putInt(id);
      bb.putInt(classNameBytes.length);
      bb.put(classNameBytes);
      bb.putInt(instClassNameBytes.length);
      bb.put(instClassNameBytes);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb);
    }
    catch (IOException ex) {
      throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_SAVING_INSTANTIATOR_TO_DISK_BECAUSE_0.toLocalizedString(ex), this.parent);
    } finally {
      lock.unlock();
    }
  }

  private void saveInstantiators() {
      Object[] objects = InternalInstantiator
          .getInstantiatorsForSerialization();
      for (Object obj : objects) {
        if (obj instanceof Instantiator) {
          saveInstantiator((Instantiator)obj);
        } else {
          InstantiatorAttributesHolder iah = (InstantiatorAttributesHolder)obj;
          saveInstantiator(iah.getId(), iah.getInstantiatorClassName(),
              iah.getInstantiatedClassName());
        }
      }
  }

  /**
   * Returns the bytes used to represent a class in an oplog.
   */
  static private byte[] classToBytes(Class c) {
    return classNameToBytes(c.getName());
  }

  /**
   * Returns the bytes used to represent a class in an oplog.
   */
  static private byte[] classNameToBytes(String cn) {
    return cn.getBytes(); // use default encoder
  }

  /**
   * Write the specified DataSerializer to the file.
   */
  private void saveDataSerializer(DataSerializer ds) {
    lock.lock();
    try {
      if (!this.compactInProgress && this.dsIds.contains(ds.getId())) {
        // dataSerializer already written to disk so just return
        return;
      }
      final byte[] classNameBytes = classToBytes(ds.getClass());
      ByteBuffer bb = getIFWriteBuffer(1 + 4 + classNameBytes.length + 1);
      bb.put(IFREC_DATA_SERIALIZER_ID);
      bb.putInt(classNameBytes.length);
      bb.put(classNameBytes);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb);
    }
    catch (IOException ex) {
      throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_SAVING_DATA_SERIALIZER_TO_DISK_BECAUSE_0.toLocalizedString(ex), this.parent);
    } finally {
      lock.unlock();
    }
  }

  private void saveDataSerializers() {
    DataSerializer[] dataSerializers = InternalDataSerializer.getSerializers();
    for (int i = 0; i < dataSerializers.length; i++) {
      saveDataSerializer(dataSerializers[i]);
    }
  }

  private void saveGemfireVersion() {
    if (this.gfversion == null) {
      this.gfversion = Version.CURRENT;
    }
    writeGemfireVersion(this.gfversion);
  }

  private void stopListeningForDataSerializerChanges() {
    if (this.regListener != null) {
      InternalDataSerializer.removeRegistrationListener(this.regListener);
    }
  }

  private LogWriterI18n getLogger() {
    return this.parent.logger;
  }
  
  public long getMaxRecoveredClearEntryId() {
    return clearOplogEntryIdHWM;
  }
  
  private ByteBuffer getIFWriteBuffer(int size) {
    return ByteBuffer.allocate(size);
  }
  private void writeIFRecord(ByteBuffer bb) throws IOException {
    writeIFRecord(bb, true);
  }
  private void writeIFRecord(ByteBuffer bb, boolean doStats)
    throws IOException
  {
    assert lock.isHeldByCurrentThread();
    if (this.closed) {
      throw new DiskAccessException("The disk store is closed", parent);
    }
//    if (this.closed) {
//      throw new DiskAccessException("Init file is closed!", parent);
//    }

    this.ifRAF.write(bb.array(), 0, bb.position());
    if (DiskStoreImpl.TRACE_WRITES) {
      getLogger().info(LocalizedStrings.DEBUG, "TRACE_WRITES: DiskInitFile writeIFRecord bb[0] = "
          + bb.array()[0]
              //                        , new RuntimeException("STACK")
          );
    }
    if (doStats) {
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
    compactIfNeeded();
  }
  private void writeIFRecord(HeapDataOutputStream hdos, boolean doStats) throws IOException
  {
    assert lock.isHeldByCurrentThread();
    if (this.closed) {
      throw new DiskAccessException("The disk store is closed", parent);
    }
    hdos.sendTo(this.ifRAF);
    if (DiskStoreImpl.TRACE_WRITES) {
      getLogger().info(LocalizedStrings.DEBUG, "TRACE_WRITES: DiskInitFile writeIFRecord HDOS"
          //                        , new RuntimeException("STACK")
          );
    }
    if (doStats) {
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
    compactIfNeeded();
  }

  /**
   * If the file is smaller than this constant then it
   * does not need to be compacted.
   */
  private static final long MIN_SIZE_BEFORE_COMPACT = 1024 * 1024;
  /**
   * If the ratio of live vs. dead is not less than this constant
   * then no need to compact.
   */
  private static final double COMPACT_RATIO = 0.5;

  private void compactIfNeeded() {
    lock.lock(false);
    try {
      if (this.compactInProgress) return;
      if (this.ifTotalRecordCount == 0) return;
      if (this.ifTotalRecordCount == this.ifLiveRecordCount) return;
      if (this.ifRAF.length() <= MIN_SIZE_BEFORE_COMPACT) return;
      if ((double)this.ifLiveRecordCount / (double)this.ifTotalRecordCount > COMPACT_RATIO) return;
      compact();
    } catch (IOException ignore) {
      return;
    }  finally {
      lock.unlock();
    }
  }

  private File getTempFile() {
    return new File(this.ifFile.getAbsolutePath()+"tmp");
  }
  
  public File getIFFile() {
    return this.ifFile;
  }

  private void compact() {
    lock.lock(false);
    this.compactInProgress = true;
    try {
      try {
        this.ifRAF.close();
      } catch (IOException ignore) {
      }
      // rename the old file to tmpFile
      File tmpFile = getTempFile();
      if (this.ifFile.renameTo(tmpFile)) {
        boolean success = false;
        try {
          // create the new file
          openRAF();
          // fill the new file with data
          writeLiveData();
          success = true;
      
          // delete the old file
          if (!tmpFile.delete()) {
            throw new DiskAccessException("could not delete temporary file " + tmpFile, this.parent);
          }
        } catch (DiskAccessException ignore) {
          getLogger().warning(LocalizedStrings.DEBUG,"Exception compacting init file " +this, ignore);
        } finally {
          if(!success) {
            //if we failed
            // close the new one and delete it
            try {
              this.ifRAF.close();
            } catch (IOException ignore2) {
            }
            if (!this.ifFile.delete()) {
              throw new DiskAccessException("could not delete file " + this.ifFile, this.parent);
            }
            if(!tmpFile.renameTo(this.ifFile)) {
              throw new DiskAccessException("could not rename file " + tmpFile + " to " + this.ifFile, this.parent);
            }
            // reopen the old file since we couldn't write the new one
            openRAF();
            // reset the counts to 0 so we will try a compaction again
            // in the future but not right away.
            this.ifLiveRecordCount = 0;
            this.ifTotalRecordCount = 0;
          }
        }
      } else {
        // reopen the old file since we couldn't rename it
        openRAF();
        // reset the counts to 0 so we will try a compaction again
        // in the future but not right away.
        this.ifLiveRecordCount = 0;
        this.ifTotalRecordCount = 0;
      }
    } finally {
      this.compactInProgress = false;
      lock.unlock();
    }
  }
  
  public void copyTo(File targetDir) throws IOException {
    lock.lock(false);
    try {
      FileUtil.copy(this.ifFile, targetDir);
    } finally {
      lock.unlock();
    }
  }

  private void openRAF() {
    if (DiskStoreImpl.PREALLOCATE_IF) {
      openRAF2();
      return;
    }
    try {
      this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
      long len = this.ifRAF.length();
      if (len != 0) {
        this.ifRAF.seek(len);
      }
    } catch (IOException ex) {
      throw new DiskAccessException(
          LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile
              .getPath()), ex, this.parent);
    }
  }

  protected String getFileMode() {
    return DiskStoreImpl.SYNC_IF_WRITES || parent.getSyncWrites() ? "rwd" : "rw";
  }
  
  private void openRAF2() {
    try {
      this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
      long len = this.ifRAF.length();
      if (len != 0) {
        // this.ifRAF.seek(len);
        if (this.gotEOF) {
          this.ifRAF.seek(this.nextSeekPosition - 1);
        }
        else {
          this.ifRAF.seek(this.nextSeekPosition);
        }
      }
      else {
        // pre-allocate the if file using some percentage of max Oplog size but
        // with max of 10M and min of 1M
        long maxSizeInMB = Math.min(
            Math.max(this.parent.getMaxOplogSize() / 200L, 1L), 10L);
        byte[] buffer = new byte[(1024 * 1024)];
        for (int i = 0; i < maxSizeInMB; i++) {
          this.ifRAF.write(buffer);
        }
        this.ifRAF.seek(0L);
      }
    } catch (IOException ex) {
      throw new DiskAccessException(
          LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile
              .getPath()), ex, this.parent);
    }
  }

  /**
   * Write all live data to the init file
   */
  private void writeLiveData() {
    lock.lock(false);
    try {
      this.ifLiveRecordCount = 0;
      this.ifTotalRecordCount = 0;
      writeDiskStoreId();
      saveGemfireVersion();
      saveInstantiators();
      saveDataSerializers();
      saveCrfIds();
      saveDrfIds();
      saveKrfIds();
      saveIrfIds();
      for (DiskRegionView drv: this.drMap.values()) {
        writeLiveData(drv);
      }
      for (DiskRegionView drv: this.parent.getDiskRegions()) {
        writeLiveData(drv);
      }
      savePRConfigs();
      saveCanonicalIds();
      saveRevokedMembers();
      getLogger().info(LocalizedStrings.DEBUG, "After compacting init file lrc=" + this.ifLiveRecordCount + " trc=" + this.ifTotalRecordCount);
    } finally {
      lock.unlock();
    }
  }

  private void saveCrfIds() {
    for (TLongIterator i = this.crfIds.iterator(); i.hasNext(); ) {
      writeIFRecord(IFREC_CRF_CREATE, i.next());
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
  }

  private void saveDrfIds() {
    for (TLongIterator i = this.drfIds.iterator(); i.hasNext(); ) {
      writeIFRecord(IFREC_DRF_CREATE, i.next());
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
  }
  
  private void saveKrfIds() {
    for (TLongIterator i = this.krfIds.iterator(); i.hasNext(); ) {
      writeIFRecord(IFREC_KRF_CREATE, i.next());
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
  }

  private void saveIrfIds() {
    for (TLongIterator i = this.irfIds.iterator(); i.hasNext(); ) {
      writeIFRecord(IFREC_IRF_CREATE, i.next());
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
  }
  
  private void savePRConfigs() {
    for(Map.Entry entry : prMap.entrySet()) {
      writePRCreate(entry.getKey(), entry.getValue());
      this.ifLiveRecordCount++;
      this.ifTotalRecordCount++;
    }
  }

  private void saveCanonicalIds() {
    IntObjectHashMap mappings = canonicalIdHolder.getAllMappings();
    mappings.forEachKeyValue(this::writeCanonicalId);
  }

  private void saveRevokedMembers() {
    for(PersistentMemberPattern revoked : revokedMembers) {
      writeRevokedMember(revoked);
    }
  }

  private void writeDiskStoreId() {
    lock.lock();
    try {
      ByteBuffer bb = getIFWriteBuffer(1+8+8+1);
      bb.put(IFREC_DISKSTORE_ID);
      bb.putLong(parent.getDiskStoreID().getLeastSignificantBits());
      bb.putLong(parent.getDiskStoreID().getMostSignificantBits());
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    } finally {
      lock.unlock();
    }
  }
  private void writeRevokedMember(PersistentMemberPattern revoked) {
    try {
      HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT);
      hdos.write(IFREC_REVOKE_DISK_STORE_ID);
      InternalDataSerializer.invokeToData(revoked, hdos);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private void writeRegionConfig(DiskRegionView drv) {
    try {
      int len = estimateByteSize(drv.getPartitionName());
      HeapDataOutputStream bb = new HeapDataOutputStream(1+DR_ID_MAX_BYTES
          +1+1+4+4+4+4+1+1+1+1+8+4+len+4+1+6+1+1, Version.CURRENT);
      bb.write(IFREC_REGION_CONFIG_ID_75);
      writeDiskRegionID(bb, drv.getId());
      bb.write(drv.getLruAlgorithm());
      bb.write(drv.getLruAction());
      bb.writeInt(drv.getLruLimit());
      bb.writeInt(drv.getConcurrencyLevel());
      bb.writeInt(drv.getInitialCapacity());
      bb.writeFloat(drv.getLoadFactor());
      bb.write((byte)(drv.getStatisticsEnabled()?1:0));
      bb.write((byte)(drv.isBucket()?1:0));
      final EnumSet flags = drv.getFlags();
      bb.writeBoolean(flags.contains(DiskRegionFlag.HAS_REDUNDANT_COPY));
      bb.writeBoolean(flags.contains(DiskRegionFlag.DEFER_RECOVERY));
      bb.writeLong(drv.getUUID());
      bb.writeUTF(drv.getPartitionName());
      bb.writeInt(drv.getStartingBucketId());
      bb.writeBoolean(false); // griddb flag, preserve for backwards compatibility
      
      bb.writeUTF(drv.getCompressorClassName() == null ? "" : drv.getCompressorClassName());
      bb.writeBoolean(drv.getEnableOffHeapMemory());

      bb.writeBoolean(flags.contains(DiskRegionFlag.IS_WITH_VERSIONING));
      bb.write(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }

  private void writePRCreate(String name, PRPersistentConfig config) {
    try {
      int nameLength = estimateByteSize(name);
      String colocatedWith = config.getColocatedWith();
      colocatedWith = colocatedWith == null ? "" : colocatedWith;
      int colocatedLength = estimateByteSize(colocatedWith);
      HeapDataOutputStream hdos = new HeapDataOutputStream(1+nameLength+4+colocatedLength+1, Version.CURRENT);
      hdos.write(IFREC_PR_CREATE);
      hdos.writeUTF(name);
      hdos.writeInt(config.getTotalNumBuckets());
      hdos.writeUTF(colocatedWith);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, false);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private void writePRDestroy(String name) {
    try {
      int nameLength = estimateByteSize(name);
      HeapDataOutputStream hdos = new HeapDataOutputStream(1 + nameLength + 4 + 1, Version.CURRENT);
      hdos.write(IFREC_PR_DESTROY);
      hdos.writeUTF(name);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, false);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private void writeCanonicalId(int id, Object object) {
    try {
      HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT);
      hdos.write(IFREC_ADD_CANONICAL_MEMBER_ID);
      hdos.writeInt(id);
      DataSerializer.writeObject(object, hdos);
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, true);
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
  }
  
  private void writeLiveData(DiskRegionView drv) {
//     if (DiskStoreImpl.TRACE_RECOVERY) {
//       getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: writeLiveData: IFREC_CREATE_REGION_ID: " + drv.getName());
//     }
    writeIFRecord(IFREC_CREATE_REGION_ID, drv, drv.getName());
    writeRegionConfig(drv);
    if (drv.wasAboutToDestroy()) {
      writeIFRecord(IFREC_BEGIN_DESTROY_REGION_ID, drv);
    } else if (drv.wasAboutToDestroyDataStorage()) {
      writeIFRecord(IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID, drv);
    }
    if (drv.getClearOplogEntryId() != DiskStoreImpl.INVALID_ID) {
      writeIFRecord(IFREC_CLEAR_REGION_ID, drv, drv.getClearOplogEntryId());
      this.ifTotalRecordCount++;
      this.ifLiveRecordCount++;
    }
    if(drv.getClearRVV() != null) {
      writeClearRecord(drv, drv.getClearRVV());
    }
    for (PersistentMemberID pmid: drv.getOnlineMembers()) {
      writePMIDRecord(IFREC_ONLINE_MEMBER_ID, drv, pmid, true);
    }
    for (PersistentMemberID pmid: drv.getOfflineMembers()) {
      writePMIDRecord(IFREC_OFFLINE_MEMBER_ID, drv, pmid, true);
    }
    for (PersistentMemberID pmid: drv.getOfflineAndEqualMembers()) {
      writePMIDRecord(IFREC_OFFLINE_AND_EQUAL_MEMBER_ID, drv, pmid, true);
    }
    if (drv.getMyPersistentID() != null) {
      writePMIDRecord(IFREC_MY_MEMBER_INITIALIZING_ID, drv, drv.getMyPersistentID(), true);
      writeIFRecord(IFREC_MY_MEMBER_INITIALIZED_ID, drv);
    }
    if (drv.getMyInitializingID() != null) {
      writePMIDRecord(IFREC_MY_MEMBER_INITIALIZING_ID, drv, drv.getMyInitializingID(), true);
    }
  }

  void forceCompaction() {
    compact();
  }

  private byte[] pmidToBytes(PersistentMemberID id) {
    try {
      HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
      InternalDataSerializer.invokeToData(id, hdos);
      return hdos.toByteArray();
    } catch (IOException ex) {
      throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
    }
  }
  
  private PersistentMemberID bytesToPMID(byte[] bytes) {
    try {
      ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
      DataInputStream dis = new DataInputStream(bais);
      PersistentMemberID result = new PersistentMemberID();
      InternalDataSerializer.invokeFromData(result, dis);
      return result;
    } catch (IOException io) {
      throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
                                    .toLocalizedString(this.ifFile.getPath()),
                                    io, this.parent);
    } catch (ClassNotFoundException cnf) {
      throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
                                    .toLocalizedString(this.ifFile.getPath()),
                                    cnf, this.parent);
    }
  }

  // non-private methods 

  DiskInitFile(String name, DiskStoreImpl parent, boolean shouldExist, Set oplogs) {
    this.parent = parent;
    File f = new File(this.parent.getInfoFileDir().getDir(),
                      "BACKUP" + name + IF_FILE_EXT);
    final boolean didNotExist = !f.exists();
    if (shouldExist && didNotExist) {
      String msg = LocalizedStrings.DiskInitFile_THE_INIT_FILE_0_DOES_NOT_EXIST.toLocalizedString(new Object[] {f});
      if (!oplogs.isEmpty()) {
        Set allOplogs = new LinkedHashSet(oplogs);
        msg += LocalizedStrings.DiskInitFile_IF_IT_NO_LONGER_EXISTS_DELETE_FOLLOWING_FILES_TO_CREATE_THIS_DISK_STORE_EXISTING_OPLOGS_0.toLocalizedString(new Object[] {allOplogs});
      }
      throw new IllegalStateException(msg);
    }
    this.ifFile = f;
    this.dsIds = new TIntHashSet();
    this.instIds = new TIntHashSet();
    this.crfIds = new TLongHashSet();
    this.drfIds = new TLongHashSet();
    this.krfIds = new TLongHashSet();
    this.irfIds = new TLongHashSet();
    this.createdIndexIds = new HashSet();
    this.deletedIndexIds = new HashSet();
    recover();
    if (this.parent.isOffline() && !this.parent.isOfflineCompacting()) {
      dump();
    }
    openRAF();
    if (!this.parent.isOffline() || this.parent.isOfflineCompacting()) {
      if (didNotExist) {
        this.parent.setDiskStoreID(DiskStoreID.random());
        writeDiskStoreId();
        saveGemfireVersion(); // normal create diskstore
      }
      this.regListener = new InternalDataSerializer.RegistrationListener() {
          public void newInstantiator(Instantiator i) {
            saveInstantiator(i);
          }
          public void newDataSerializer(DataSerializer ds) {
            saveDataSerializer(ds);
          }
        };
      InternalDataSerializer.addRegistrationListener(this.regListener);
      // do this after the listener is registered to make sure we don't
      // miss any registrations.
      saveInstantiators();
      saveDataSerializers();
    } else {
      this.regListener = null;
    }
  }

  void closeRegion(DiskRegionView dr) {
    lock.lock();
    try {
      this.parent.rmById(dr.getId()); // fix for bug 41334
      PlaceHolderDiskRegion phdr = new PlaceHolderDiskRegion(dr);
      this.drMap.put(dr.getId(), phdr);
      this.drMapByName.put(dr.getName(), phdr);
      // @todo make sure we only have one instance of the region for this name
    } finally {
      lock.unlock();
    }
  }

  void clearRegion(DiskRegionView dr, long clearOplogEntryId) {
    lock.lock();
    try {
      //     getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile writing IFREC_CLEAR_REGION_ID" + " clearOplogEntryId=" + clearOplogEntryId);
      if (clearOplogEntryId != DiskStoreImpl.INVALID_ID) {
        this.ifTotalRecordCount++;
        if (dr.getClearOplogEntryId() == DiskStoreImpl.INVALID_ID) {
          this.ifLiveRecordCount++;
        } else {
          // we now have one record to gc (the previous clear).
        }
        dr.setClearOplogEntryId(clearOplogEntryId);
        if(clearOplogEntryId > clearOplogEntryIdHWM) {
          clearOplogEntryIdHWM = clearOplogEntryId;
        }
        writeIFRecord(IFREC_CLEAR_REGION_ID, dr, clearOplogEntryId);
      }
    } finally {
      lock.unlock();
    } 
  }
  
  /**
   * Clear the region using an RVV.
   */
  void clearRegion(DiskRegion dr, RegionVersionVector rvv) {
    lock.lock();
    try {
      //     getLogger().info(LocalizedStrings.DEBUG, "DEBUG: DiskInitFile writing IFREC_CLEAR_REGION_ID" + " clearOplogEntryId=" + clearOplogEntryId);
      this.ifTotalRecordCount++;
      if (dr.getClearRVV() == null) {
        this.ifLiveRecordCount++;
      } else {
        // we now have one record to gc (the previous clear).
      }
      dr.setClearRVV(rvv);
      writeClearRecord(dr, rvv);
    } finally {
      lock.unlock();
    } 
    
  }

  /**
   * Write a clear with an RVV record.
   */
  private void writeClearRecord(DiskRegionView dr,
      RegionVersionVector rvv) {
    try {
      HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT);
      hdos.write(IFREC_CLEAR_REGION_WITH_RVV_ID);
      writeDiskRegionID(hdos, dr.getId());
      //We only need the memberToVersionMap for clear purposes
      Map memberToVersion = rvv.getMemberToVersion();
      hdos.writeInt(memberToVersion.size());
      for(Map.Entry entry : memberToVersion.entrySet()) {
        InternalDataSerializer.invokeToData(entry.getKey(), hdos);
        synchronized(entry.getValue()) {
          InternalDataSerializer.invokeToData(entry.getValue(), hdos);
        }
      }
      hdos.write(END_OF_RECORD_ID);
      writeIFRecord(hdos, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    }
    
  }

  void createRegion(DiskRegionView drv) {
    lock.lock();
    try {
      if (!drv.isRecreated()) {
        //       if (DiskStoreImpl.TRACE_RECOVERY) {
        //         getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: createRegion: IFREC_CREATE_REGION_ID: " + drv.getName(), new RuntimeException("STACK"));
        //       }
        writeIFRecord(IFREC_CREATE_REGION_ID, drv, drv.getName());
        this.liveRegions++;
        writeRegionConfig(drv);
        // no need to add to drMap since it will be in the DiskStore drMap
      } else {
        if (drv.hasConfigChanged()) {
          writeRegionConfig(drv);
          drv.setConfigChanged(false);
        }
      }
    } finally {
      lock.unlock();
    }
  }

  void beginDestroyRegion(DiskRegionView dr) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        cmnBeginDestroyRegion(dr);
        writeIFRecord(IFREC_BEGIN_DESTROY_REGION_ID, dr);
      } 
    } finally {
      lock.unlock();
    }
  }
  void endDestroyRegion(DiskRegionView dr) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        cmnEndDestroyRegion(dr);
        writeIFRecord(IFREC_END_DESTROY_REGION_ID, dr);
        if (DiskStoreImpl.TRACE_WRITES) {
          getLogger().info(LocalizedStrings.DEBUG, "TRACE_WRITES: DiskInitFile IFREC_END_DESTROY_REGION_ID drId="
              + dr.getId()
              //                          , new RuntimeException("STACK")
          );
        }
      }
    } finally {
      lock.unlock();
    }
//     getLogger().info(LocalizedStrings.DEBUG, "DEBUG endDestroyRegion drId="
//                      + dr.getId()
//                      + " drMap=" + this.drMap.get(dr.getId())
//                      + " parentMap=" + this.parent.getById(dr.getId())
//                      );
  }
  void beginDestroyDataStorage(DiskRegionView dr) {
    lock.lock();
    try {
      assert regionStillCreated(dr);
      cmnBeginPartialDestroyRegion(dr);
      writeIFRecord(IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID, dr);
    } finally {
      lock.unlock();
    }
  }
  void endDestroyDataStorage(DiskRegionView dr) {
    lock.lock();
    try {
      assert regionStillCreated(dr);
      cmnEndPartialDestroyRegion(dr);
      writeIFRecord(IFREC_END_PARTIAL_DESTROY_REGION_ID, dr);
    } finally {
      lock.unlock();
    }
  }
  public void createPersistentPR(String name, PRPersistentConfig config) {
    lock.lock();
    try {
      if(cmnPRCreate(name, config)) {
        writePRCreate(name, config);
      }
    } finally {
      lock.unlock();
    }
  }
  public void destroyPersistentPR(String name) {
    lock.lock();
    try {
      if(cmnPRDestroy(name)) {
        writePRDestroy(name);
      }
    } finally {
      lock.unlock();
    }
  }
  public PRPersistentConfig getPersistentPR(String name) {
    lock.lock(false);
    try {
      return prMap.get(name);
    } finally {
      lock.unlock();
    }
  }
  public Map getAllPRs() {
    lock.lock(false);
    try {
      return new HashMap(prMap);
    } finally {
      lock.unlock();
    }
  }

  void crfCreate(long oplogId) {
    lock.lock(false);
    try {
      cmnCrfCreate(oplogId);
      writeIFRecord(IFREC_CRF_CREATE, oplogId);
    } finally {
      lock.unlock();
    }
  }
  void drfCreate(long oplogId) {
    lock.lock(false);
    try {
      cmnDrfCreate(oplogId);
      writeIFRecord(IFREC_DRF_CREATE, oplogId);
    } finally {
      lock.unlock();
    }
  }
  void krfCreate(long oplogId) {
    lock.lock(false);
    try {
      cmnKrfCreate(oplogId);
      writeIFRecord(IFREC_KRF_CREATE, oplogId);
    } finally {
      lock.unlock();
    }
  }
  void irfCreate(long oplogId) {
    lock.lock(false);
    try {
      cmnIrfCreate(oplogId);
      writeIFRecord(IFREC_IRF_CREATE, oplogId);
    } finally {
      lock.unlock();
    }
  }
  void irfDelete(long oplogId) {
    lock.lock(false);
    try {
      cmnIrfDelete(oplogId);
      writeIFRecord(IFREC_IRF_DELETE, oplogId);
    } finally {
      lock.unlock();
    }
  }
  void indexCreate(String indexId) {
    lock.lock(false);
    try {
      cmnIndexCreate(indexId);
      writeIFRecord(IFREC_INDEX_CREATE, indexId);
    } finally {
      lock.unlock();
    }
  }
  void indexDelete(String indexId) {
    lock.lock(false);
    try {
      cmnIndexDelete(indexId);
      writeIFRecord(IFREC_INDEX_DELETE, indexId);
    } finally {
      lock.unlock();
    }
  }
  void crfDelete(long oplogId) {
    lock.lock(false);
    try {
      if (cmnCrfDelete(oplogId)) {
        // call writeIFRecord AFTER cmnCrfDelete to fix bug 41505
        writeIFRecord(IFREC_CRF_DELETE, oplogId);
      }
    } finally {
      lock.unlock();
    }
  }
  void drfDelete(long oplogId) {
    lock.lock(false);
    try {
      if (cmnDrfDelete(oplogId)) {
        writeIFRecord(IFREC_DRF_DELETE, oplogId);
      }
    } finally {
      lock.unlock();
    }
  }
  int getOrCreateCanonicalId(Object object) {
    lock.lock(false);
    try {
      int id = canonicalIdHolder.getId(object);
      if(id <=0) {
        id = canonicalIdHolder.createId(object);
        writeCanonicalId(id, object);
      }
      return id;
    } finally {
      lock.unlock();
    }
  }
  Object getCanonicalObject(int id) {
    lock.lock(false);
    try {
      return canonicalIdHolder.getObject(id);
    } finally {
      lock.unlock();
    }
  }
  void close() {
    lock.lock();
    try {
      if (this.closed) return;
      this.closed = true;
      stopListeningForDataSerializerChanges();
      try {
        this.ifRAF.close();
      } catch (IOException ignore) {
      }
      if (this.liveRegions == 0 && !parent.isValidating()) {
        GemFireCacheImpl.StaticSystemCallbacks ssc = this.parent.getCache().getInternalProductCallbacks();
        if (ssc != null && !ssc.isAccessor()) {
          basicDestroy();
        }
      }
    } finally {
      lock.unlock();
    }
  }

  void destroy() {
    lock.lock();
    try {
      close();
      basicDestroy();
    } finally {
      lock.unlock();
    }
  }

  private void basicDestroy() {
    if (this.ifFile.exists()) {
//       getLogger().info(LocalizedStrings.DEBUG, "DEBUG destroying ifFile=" + this.ifFile, new RuntimeException("STACK"));
      if (!this.ifFile.delete()) {
        getLogger().info(LocalizedStrings.DEBUG, "could not delete file " + this.ifFile);
      }
    }
  }
  

  void addMyInitializingPMID(DiskRegionView dr, PersistentMemberID pmid) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
  //       getLogger().info(LocalizedStrings.DEBUG, "DEBUG addMyInitializingPMID drId="
  //                   + dr.getId()
  //                   + " drMap=" + this.drMap.get(dr.getId())
  //                   + " parentMap=" + this.parent.getById(dr.getId())
  //                   );
        cmnAddMyInitializingPMID(dr, pmid);
        writePMIDRecord(IFREC_MY_MEMBER_INITIALIZING_ID, dr, pmid, false);
      }
    } finally {
      lock.unlock();
    }
  }
  void markInitialized(DiskRegionView dr) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        writeIFRecord(IFREC_MY_MEMBER_INITIALIZED_ID, dr);
        cmnMarkInitialized(dr);
      }
    } finally {
      lock.unlock();
    }
  }
  void addOnlinePMID(DiskRegionView dr, PersistentMemberID pmid) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        if (dr.addOnlineMember(pmid)) {
          if (dr.rmOfflineMember(pmid) || dr.rmEqualMember(pmid)) {
            this.ifLiveRecordCount--;
          }
          writePMIDRecord(IFREC_ONLINE_MEMBER_ID, dr, pmid, true);
        }
      }
    } finally {
      lock.unlock();
    }
  }
  void addOfflinePMID(DiskRegionView dr, PersistentMemberID pmid) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        if (dr.addOfflineMember(pmid)) {
          if (dr.rmOnlineMember(pmid) || dr.rmEqualMember(pmid)) { 
            this.ifLiveRecordCount--;
          }
          writePMIDRecord(IFREC_OFFLINE_MEMBER_ID, dr, pmid, true);
        }
      }
    } finally {
      lock.unlock();
    }
  }
  void addOfflineAndEqualPMID(DiskRegionView dr,
        PersistentMemberID pmid) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        if (dr.addOfflineAndEqualMember(pmid)) {
          if (dr.rmOnlineMember(pmid) || dr.rmOfflineMember(pmid)) { 
            this.ifLiveRecordCount--;
          }
          writePMIDRecord(IFREC_OFFLINE_AND_EQUAL_MEMBER_ID, dr, pmid, true);
        }
      }
    } finally {
      lock.unlock();
    }
  }
  void rmPMID(DiskRegionView dr, PersistentMemberID pmid) {
    lock.lock();
    try {
      if (regionStillCreated(dr)) {
        if (dr.rmOnlineMember(pmid)
            || dr.rmOfflineMember(pmid)
            || dr.rmEqualMember(pmid)) {
          // we now have two records to gc (this one and the live one we removed).
          this.ifLiveRecordCount--;
          this.ifTotalRecordCount++;
          writePMIDRecord(IFREC_RM_MEMBER_ID, dr, pmid, false);
        }
      }
    } finally {
      lock.unlock();
    }
  }
  
  public void revokeMember(PersistentMemberPattern revokedPattern) {
    //We're only going to record members revoked with the new API - 
    //using the UUID
    if(revokedPattern.getUUID() == null) {
      return;
    }
    
    lock.lock();
    try {
      if(cmnRevokeDiskStoreId(revokedPattern)) {
        // we now have two records to gc (this one and the live one we removed).
        this.ifLiveRecordCount++;
        this.ifTotalRecordCount++;
        writeRevokedMember(revokedPattern);
      }
    } finally {
      lock.unlock();
    }
  }
  
  /**
   * Get the set of members known to be revoked
   */
  public Set getRevokedIDs() {
    lock.lock(false);
    try {
      //Return a copy of the set, because we modify it in place.
      return new HashSet(this.revokedMembers);
    } finally {
      lock.unlock();
    }
    
  }

  /**
   * Return true if the given dr is still created in this IF.
   */
  boolean regionStillCreated(DiskRegionView dr) {
    lock.lock(false);
    try {
      return getDiskRegionById(dr.getId()) != null;
    } finally {
      lock.unlock();
    }
  }

  boolean regionExists(long drId) {
    lock.lock(false);
    try {
      // @todo make drMap concurrent so this call can be fast
      return this.drMap.containsKey(drId);
    } finally {
      lock.unlock();
    }
  }

  /**
   * Additional flags for a disk region that are persisted in its meta-data.
   * Currently only few for GemFireXD added here but all other boolean flags also
   * be better moved here.
   * 
   * @author swale
   * @since 7.0
   */
  public enum DiskRegionFlag {
    /**
     * True if this a disk region for a PR bucket having redundant copies > 0.
     * It changes the type of underlying RegionEntry created in GemFireXD.
     */
    HAS_REDUNDANT_COPY,
    /**
     * True if recovery of values for this region should be done at the end
     * after all other disk regions. Used by GemFireXD for Gateway queues, for
     * example, that contain keys referring to all other regions and need the
     * table meta-data to be ready before the keys can be initialized.
     */
    DEFER_RECOVERY,
    /**
     * True if this disk region has entries with versioning enabled. Depending
     * on this flag, the appropriate RegionEntryFactory gets instantiated.
     */
    IS_WITH_VERSIONING
  }

  Collection getKnown() {
    lock.lock(false);
    try {
      return new ArrayList(this.drMap.values());
    } finally {
      lock.unlock();
    }
  }

  public void cmnAddMyInitializingPMID(long drId, PersistentMemberID pmid) {
    cmnAddMyInitializingPMID(getDiskRegionById(drId), pmid);
    
  }

  public void cmnBeginDestroyRegion(long drId) {
    cmnBeginDestroyRegion(getDiskRegionById(drId));
  }

  public void cmnBeginPartialDestroyRegion(long drId) {
    cmnBeginPartialDestroyRegion(getDiskRegionById(drId));
  }

  public void cmnEndDestroyRegion(long drId) {
    cmnEndDestroyRegion(getDiskRegionById(drId));
  }

  public void cmnEndPartialDestroyRegion(long drId) {
    cmnEndPartialDestroyRegion(getDiskRegionById(drId));
  }

  public void cmnMarkInitialized(long drId) {
    cmnMarkInitialized(getDiskRegionById(drId));
  }
  
  @Override
  public void cmnDiskStoreID(DiskStoreID diskStoreID) {
    if (DiskStoreImpl.TRACE_RECOVERY) {
      getLogger().info(LocalizedStrings.DEBUG, "TRACE_RECOVERY: diskStoreId="
                       + diskStoreID);
    }
    this.parent.setDiskStoreID(diskStoreID);
  }
  
  public boolean cmnRevokeDiskStoreId(PersistentMemberPattern revokedPattern) {
    return this.revokedMembers.add(revokedPattern);
  }
  
  public String getNameForError() {
    return this.parent.toString();
  }

  public boolean isClosing() {
    return parent.isClosing();
  }

  public void dump() {
    if (DiskStoreImpl.TRACE_RECOVERY) {
      System.out.println("expectedCrfs=" + Arrays.toString(this.crfIds.toArray()));
      System.out.println("expectedDrfs=" + Arrays.toString(this.drfIds.toArray()));
      System.out.println("dataSerializerIds=" + Arrays.toString(this.dsIds.toArray()));
      System.out.println("instantiatorIds=  " + Arrays.toString(this.instIds.toArray()));
    }
  }
  
  /**
   * Returns a map of region_name->(pr_buckets|replicated_region)
   * @param regName
   */
  private Map> getRegionsToDump(String regName) {
    if (regName == null) {
      Map> regions = new HashMap>();
      for (DiskRegionView drv: this.drMap.values()) {
        if (drv instanceof PlaceHolderDiskRegion) {
          PlaceHolderDiskRegion dr = (PlaceHolderDiskRegion)drv;
          if (dr.isBucket()) {
            List buckets = regions.get(dr.getPrName());
            if(buckets == null) {
              buckets = new ArrayList();
              regions.put(dr.getPrName(), buckets);
            }
            buckets.add(dr);
          } else {
            regions.put(drv.getName(), Collections.singletonList(dr));
          }
        }
      }
      return regions;
    } else {
      DiskRegionView drv = getDiskRegionByName(regName);
      if (drv == null) {
        List buckets = new ArrayList();
        for (PlaceHolderDiskRegion dr: this.drMapByName.values()) {
          if (dr.isBucket()) {
            if (dr.getName().equals(dr.getPrName())) {
              buckets.add(dr);
            }
          }
        }
        if(buckets.isEmpty()) {
          throw new IllegalArgumentException("The disk store does not contain a region named " + regName);  
        } else {
          return Collections.singletonMap(regName, buckets);
        }
      } else if (drv instanceof PlaceHolderDiskRegion) {
        return Collections.singletonMap(regName, Collections.singletonList((PlaceHolderDiskRegion) drv));
      } else {
        return Collections.emptyMap();
      }
    }
  }
  
  public void dumpRegionInfo(PrintStream printStream, String regName) {
    printStream.println("Regions in the disk store:");
    for (Map.Entry> regionEntry : getRegionsToDump(regName).entrySet()) {
      printStream.print("  ");
      List regions = regionEntry.getValue();
      if(DiskStoreImpl.TRACE_RECOVERY) { 
        for(PlaceHolderDiskRegion region : regions) {
          region.dump(printStream);
        }
      } else {
        //NOTE, regions  will always have at least 1 item.
        regions.get(0).dump(printStream);
      }
    }
  }
  
  public void dumpRegionMetadata(boolean showBuckets) {
    System.out.println("Disk Store ID: " + getDiskStore().getDiskStoreID());
    System.out.println("Regions in the disk store:");
    for (Map.Entry> regionEntry : getRegionsToDump(null).entrySet()) {
      System.out.print("  ");
      List regions = regionEntry.getValue();
      PlaceHolderDiskRegion region0 = regions.get(0);
      if(region0.isBucket()) {
        dumpPRMetaData(showBuckets, regions);
        
      } else {
        region0.dumpMetadata();
      }
    }
  }

  /**
   * Dump the metadata for a partitioned region, optionally dumping the meta
   * data for individual buckets. 
   */
  private void dumpPRMetaData(boolean showBuckets, List regions) {
    StringBuilder msg = new StringBuilder(regions.get(0).getPrName());
    regions.get(0).dumpCommonAttributes(msg);
    
    if(showBuckets) {
      for(PlaceHolderDiskRegion region : regions) {
        msg.append("\n");
        msg.append("\n");
        msg.append(region.getName());
        region.dumpPersistentView(msg);
      }
    } else {
      Map online = new HashMap();
      Map offline = new HashMap();
      Map equal = new HashMap();
      for(PlaceHolderDiskRegion region : regions) {
        for(PersistentMemberID mem: region.getOnlineMembers()) {
          online.put(mem.diskStoreId, mem.host + ":" + mem.directory);
        }
        for(PersistentMemberID mem: region.getOfflineMembers()) {
          offline.put(mem.diskStoreId, mem.host + ":" + mem.directory);
        }
        for(PersistentMemberID mem: region.getOfflineAndEqualMembers()) {
          equal.put(mem.diskStoreId, mem.host + ":" + mem.directory);
        }
      }
      
      msg.append("\n\tonlineMembers:");
      for (Map.Entry id : online.entrySet()) {
        msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
      }

      msg.append("\n\tofflineMembers:");
      for (Map.Entry id : offline.entrySet()) {
        msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
      }

      msg.append("\n\tequalsMembers:");
      for (Map.Entry id : equal.entrySet()) {
        msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
      }
    }
    
    System.out.println(msg);
  }

  public void destroyPRRegion(String prName) {
    ArrayList buckets = new ArrayList();
    lock.lock();
    try {
      for (PlaceHolderDiskRegion dr: this.drMapByName.values()) {
        if (dr.isBucket()) {
          if (prName.equals(dr.getPrName())) {
            buckets.add(dr);
          }
        }
      }
    } finally {
      lock.unlock();
    }
    for (PlaceHolderDiskRegion dr: buckets) {
      endDestroyRegion(dr);
    }
    
    //Remove the partitioned region record
    //for this disk store.
    destroyPersistentPR(prName);
  }
                             
  public String modifyPRRegion(String prName,
                             String lruOption,
                             String lruActionOption,
                             String lruLimitOption,
                             String concurrencyLevelOption,
                             String initialCapacityOption,
                             String loadFactorOption,
                             String compressorClassNameOption,
                             String statisticsEnabledOption,
                             boolean printToConsole) {
    StringBuffer sb = new StringBuffer();
    ArrayList buckets = new ArrayList();
    lock.lock();
    try {
      for (PlaceHolderDiskRegion dr: this.drMapByName.values()) {
        if (dr.isBucket()) {
          if (prName.equals(dr.getPrName())) {
            buckets.add(dr);
          }
        }
      }
      
      // only print info on the first bucket to fix bug 41735
      boolean printInfo = true;
      for (PlaceHolderDiskRegion dr: buckets) {
        String message = basicModifyRegion(printInfo, dr, lruOption, lruActionOption, lruLimitOption,
            concurrencyLevelOption, initialCapacityOption, loadFactorOption,
            compressorClassNameOption, statisticsEnabledOption, printToConsole);
        if (printInfo)
          sb.append(message);
        printInfo = false;
      }
    } finally {
      lock.unlock();
    }
    return sb.toString();
  }
  public String modifyRegion(DiskRegionView drv,
                           String lruOption,
                           String lruActionOption,
                           String lruLimitOption,
                           String concurrencyLevelOption,
                           String initialCapacityOption,
                           String loadFactorOption,
                           String compressorClassNameOption,
                           String statisticsEnabledOption,
                           boolean printToConsole) {
    lock.lock();
    try {
      return basicModifyRegion(false, drv, lruOption, lruActionOption, lruLimitOption,
          concurrencyLevelOption, initialCapacityOption,
          loadFactorOption, compressorClassNameOption,
          statisticsEnabledOption, printToConsole);
    } finally {
      lock.unlock();
    }
  }
  private String basicModifyRegion(boolean printInfo, DiskRegionView drv,
                                 String lruOption,
                                 String lruActionOption,
                                 String lruLimitOption,
                                 String concurrencyLevelOption,
                                 String initialCapacityOption,
                                 String loadFactorOption,
                                 String compressorClassNameOption,
                                 String statisticsEnabledOption,
                                 boolean printToConsole) {
    byte lruAlgorithm = drv.getLruAlgorithm();
    byte lruAction = drv.getLruAction();
    int lruLimit = drv.getLruLimit();
    int concurrencyLevel = drv.getConcurrencyLevel();
    int initialCapacity = drv.getInitialCapacity();
    float loadFactor = drv.getLoadFactor();
    String compressorClassName = drv.getCompressorClassName();
    boolean statisticsEnabled = drv.getStatisticsEnabled();
    StringBuffer sb = new StringBuffer();
    final String lineSeparator = System.getProperty("line.separator");
    
    if (lruOption != null) {
      EvictionAlgorithm ea = EvictionAlgorithm.parseAction(lruOption);
      if (ea != null) {
        lruAlgorithm = (byte)ea.getValue();
      } else {
        throw new IllegalArgumentException("Expected lru to be one of the following: \"none\", \"lru-entry-count\", \"lru-heap-percentage\", or \"lru-memory-size\"");
      }
      if (ea.isNone()) {
        lruAction = (byte)EvictionAction.NONE.getValue();
        lruLimit = 0;
      } else if (ea.isLRUHeap()) {
        lruLimit = 0;
      }
    }
    if (lruActionOption != null) {
      EvictionAction ea = EvictionAction.parseAction(lruActionOption);
      if (ea != null) {
        lruAction = (byte)ea.getValue();
      } else {
        throw new IllegalArgumentException("Expected lruAction to be one of the following: \"none\", \"overflow-to-disk\", or \"local-destroy\"");
      }
    }
    if (lruLimitOption != null) {
      lruLimit = Integer.parseInt(lruLimitOption);
      if (lruLimit < 0) {
        throw new IllegalArgumentException("Expected lruLimit to be greater than or equal to zero");
      }
    }
    if (concurrencyLevelOption != null) {
      concurrencyLevel = Integer.parseInt(concurrencyLevelOption);
      if (concurrencyLevel < 0) {
        throw new IllegalArgumentException("Expected concurrencyLevel to be greater than or equal to zero");
      }
    }
    if (initialCapacityOption != null) {
      initialCapacity = Integer.parseInt(initialCapacityOption);
      if (initialCapacity < 0) {
        throw new IllegalArgumentException("Expected initialCapacity to be greater than or equal to zero");
      }
    }
    if (loadFactorOption != null) {
      loadFactor = Float.parseFloat(loadFactorOption);
      if (loadFactor < 0.0) {
        throw new IllegalArgumentException("Expected loadFactor to be greater than or equal to zero");
      }
    }
    if (compressorClassNameOption != null) {
      compressorClassName = (compressorClassNameOption.equals("") ? null : compressorClassNameOption);
    }
    if (statisticsEnabledOption != null) {
      statisticsEnabled = Boolean.parseBoolean(statisticsEnabledOption);
      if (!statisticsEnabled) {
        // make sure it is "false"
        if (!statisticsEnabledOption.equalsIgnoreCase("false")) {
        throw new IllegalArgumentException("Expected statisticsEnabled to be \"true\" or \"false\"");
        }
      }
    }
    
    sb.append("Before modification: ");
    sb.append(lineSeparator);
    sb.append(((PlaceHolderDiskRegion)drv).dump2());
    sb.append(lineSeparator);
    
    drv.setConfig(lruAlgorithm, lruAction, lruLimit,
                  concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled,
                  drv.isBucket(), drv.getFlags(), drv.getUUID(),
                  drv.getPartitionName(), drv.getStartingBucketId(),
                  compressorClassName, drv.getEnableOffHeapMemory());
    // Make sure the combined lru args can still produce a legal eviction attributes
    // before writing them to disk.
    ((PlaceHolderDiskRegion)drv).getEvictionAttributes();
    writeRegionConfig(drv);
    
    sb.append("After modification: ");
    sb.append(lineSeparator);
    sb.append(((PlaceHolderDiskRegion)drv).dump2());
    sb.append(lineSeparator);
    
    String message = sb.toString();
    
    if (printInfo && printToConsole) {
      System.out.println(message);
    }
    return message;
  }
  
  public void lockForBackup() {
    lock.lockForBackup();
  }
  
  public void unlockForBackup() {
    lock.unlockForBackup();
  }
  
  public void setBackupThread(Thread thread) {
    lock.setBackupThread(thread);
  }
  
  private void writeGemfireVersion(Version version) {
    lock.lock();
    try {
      ByteBuffer bb = getIFWriteBuffer(1+3+1);
      bb.put(IFREC_GEMFIRE_VERSION);
      Version.writeOrdinal(bb, version.ordinal(), false);
      bb.put(END_OF_RECORD_ID);
      writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
      DiskAccessException dae
        = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
      if (!this.compactInProgress) {
        this.parent.shutdownDiskStoreAndAffiliatedRegions(dae);
      }
      throw dae;
    } finally {
      lock.unlock();
    }
  }
  
  public Set getCreatedIndexIds() {
    return this.createdIndexIds;
  }

  public Set getDeletedIndexIds() {
    return this.deletedIndexIds;
  }

  // This will be set only when DiskInitFile is
  // created during validation of disk store
  private transient String inconsistencyReport = null;
  private transient String columnBufferInfo = null;

  public void setInconsistent(String ir) {
    this.inconsistencyReport = ir;
  }

  public String getInconsistencyReport() {
    return this.inconsistencyReport;
  }

  public void setColumnBufferInfo(String cbinfo) {
    this.columnBufferInfo = cbinfo;
  }

  public String getColumnBufferInfo() {
    return this.columnBufferInfo;
  }
}