Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* Changes for SnappyData distributed computational and data platform.
*
* Portions Copyright (c) 2018 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache;
import java.io.File;
import java.io.FilenameFilter;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.HashSet;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import com.gemstone.gemfire.cache.DiskAccessException;
import com.gemstone.gemfire.internal.FileUtil;
import com.gemstone.gemfire.internal.cache.DiskStoreImpl.OplogEntryIdSet;
import com.gemstone.gemfire.internal.cache.persistence.DiskRecoveryStore;
import com.gemstone.gemfire.internal.cache.persistence.DiskRegionView;
import com.gemstone.gemfire.internal.cache.persistence.DiskStoreFilter;
import com.gemstone.gemfire.internal.cache.persistence.OplogType;
import com.gemstone.gemfire.internal.cache.persistence.PRPersistentConfig;
import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.sequencelog.EntryLogger;
import com.gemstone.gemfire.internal.shared.SystemProperties;
import com.gemstone.gnu.trove.TLongHashSet;
public class PersistentOplogSet implements OplogSet {
/** The active oplog * */
protected volatile Oplog child;
/** variable to generate sequential unique oplogEntryId's* */
private final AtomicLong oplogEntryId = new AtomicLong(DiskStoreImpl.INVALID_ID);
/** counter used for round-robin logic * */
int dirCounter = -1;
/**
* Contains all the oplogs that only have a drf (i.e. the crf has been deleted).
*/
final Map drfOnlyOplogs = new LinkedHashMap();
/** oplogs that are ready to compact */
final Map oplogIdToOplog = new LinkedHashMap();
/** oplogs that are done being written to but not yet ready to compact */
private final Map inactiveOplogs = new LinkedHashMap(16, 0.75f, true);
private final DiskStoreImpl parent;
final AtomicInteger inactiveOpenCount = new AtomicInteger();
private final Map pendingRecoveryMap
= new HashMap();
private final Map currentRecoveryMap
= new HashMap();
final AtomicBoolean alreadyRecoveredOnce = new AtomicBoolean(false);
/**
* The maximum oplog id we saw while recovering
*/
private volatile long maxRecoveredOplogId = 0;
public PersistentOplogSet(DiskStoreImpl parent) {
this.parent = parent;
}
/**
* returns the active child
*/
public final Oplog getChild() {
return this.child;
}
/**
* set the child to a new oplog
*
*/
void setChild(Oplog oplog) {
this.child = oplog;
// oplogSetAdd(oplog);
}
public Oplog[] getAllOplogs() {
synchronized (this.oplogIdToOplog) {
int rollNum = this.oplogIdToOplog.size();
int inactiveNum = this.inactiveOplogs.size();
int drfOnlyNum = this.drfOnlyOplogs.size();
int num = rollNum + inactiveNum + drfOnlyNum + 1;
Oplog[] oplogs = new Oplog[num];
oplogs[0] = getChild();
{
Iterator itr = this.oplogIdToOplog.values().iterator();
for (int i = 1; i <= rollNum; i++) {
oplogs[i] = itr.next();
}
}
{
Iterator itr = this.inactiveOplogs.values().iterator();
for (int i = 1; i <= inactiveNum; i++) {
oplogs[i+rollNum] = itr.next();
}
}
{
Iterator itr = this.drfOnlyOplogs.values().iterator();
for (int i = 1; i <= drfOnlyNum; i++) {
oplogs[i+rollNum+inactiveNum] = itr.next();
}
}
//Special case - no oplogs found
if(oplogs.length == 1 && oplogs[0] == null) {
return new Oplog[0];
}
return oplogs;
}
}
public TreeSet getSortedOplogs() {
TreeSet result = new TreeSet(new Comparator() {
public int compare(Object arg0, Object arg1) {
return Long.signum(((Oplog)arg1).getOplogId() - ((Oplog)arg0).getOplogId());
}
});
for (Oplog oplog: getAllOplogs()) {
if (oplog != null) {
result.add(oplog);
}
}
return result;
}
/**
* Get the oplog specified
*
* @param id
* int oplogId to be got
* @return Oplogs the oplog corresponding to the oplodId, id
*/
public Oplog getChild(long id) {
Oplog localOplog = this.child;
if (localOplog != null && id == localOplog.getOplogId()) {
return localOplog;
} else {
Long key = Long.valueOf(id);
synchronized (this.oplogIdToOplog) {
Oplog result = oplogIdToOplog.get(key);
if (result == null) {
result = inactiveOplogs.get(key);
}
return result;
}
}
}
@Override
public void create(LocalRegion region, DiskEntry entry,
DiskEntry.Helper.ValueWrapper value, boolean async) {
getChild().create(region, entry, value, async);
}
@Override
public void modify(LocalRegion region, DiskEntry entry,
DiskEntry.Helper.ValueWrapper value, boolean async) {
getChild().modify(region, entry, value, async);
}
@Override
public void remove(LocalRegion region, DiskEntry entry, boolean async,
boolean isClear) {
getChild().remove(region, entry, async, isClear);
}
public void forceRoll(DiskRegion dr) {
Oplog child = getChild();
if (child != null) {
child.forceRolling(dr, false);
}
}
public Map findFiles(String partialFileName) {
this.dirCounter = 0;
Map backupFiles = new HashMap();
FilenameFilter backupFileFilter = getFileNameFilter(partialFileName);
for (DirectoryHolder dh: parent.directories) {
File dir = dh.getDir();
File[] backupList = FileUtil.listFiles(dir, backupFileFilter);
for (File f: backupList) {
backupFiles.put(f, dh);
}
}
return backupFiles;
}
protected FilenameFilter getFileNameFilter(String partialFileName) {
return new DiskStoreFilter(OplogType.BACKUP, false, partialFileName);
}
public void createOplogs(boolean needsOplogs,
Map backupFiles) {
TLongHashSet foundCrfs = new TLongHashSet();
TLongHashSet foundDrfs = new TLongHashSet();
for (Map.Entry entry: backupFiles.entrySet()) {
File file = entry.getKey();
String absolutePath = file.getAbsolutePath();
int underscorePosition = absolutePath.lastIndexOf("_");
int pointPosition = absolutePath.indexOf(".", underscorePosition);
String opid = absolutePath.substring(underscorePosition + 1,
pointPosition);
long oplogId = Long.parseLong(opid);
maxRecoveredOplogId = Math.max(maxRecoveredOplogId, oplogId);
//here look diskinit file and check if this opid already deleted or not
//if deleted then don't process it.
if(Oplog.isCRFFile(file.getName())) {
if(!isCrfOplogIdPresent(oplogId)) {
deleteFileOnRecovery(file);
try
{
String krfFileName = Oplog.getKRFFilenameFromCRFFilename(file.getAbsolutePath());
File krfFile = new File(krfFileName);
deleteFileOnRecovery(krfFile);
}catch(Exception ex) {//ignore
}
continue; //this file we unable to delete earlier
}
} else if(Oplog.isDRFFile(file.getName())) {
if(!isDrfOplogIdPresent(oplogId)) {
deleteFileOnRecovery(file);
continue; //this file we unable to delete earlier
}
} else if (Oplog.isIRFFile(file.getName())) {
if(!isIrfOplogIdPresent(oplogId)) {
deleteFileOnRecovery(file);
continue;
}
}
Oplog oplog = getChild(oplogId);
if (oplog == null) {
oplog = new Oplog(oplogId, this);
//oplogSet.add(oplog);
addRecoveredOplog(oplog);
}
oplog.addRecoveredFile(file, entry.getValue(), foundCrfs, foundDrfs);
}
if(needsOplogs) {
verifyOplogs(foundCrfs, foundDrfs);
}
}
protected boolean isDrfOplogIdPresent(long oplogId) {
return parent.getDiskInitFile().isDRFOplogIdPresent(oplogId);
}
protected boolean isCrfOplogIdPresent(long oplogId) {
return parent.getDiskInitFile().isCRFOplogIdPresent(oplogId);
}
protected boolean isIrfOplogIdPresent(long oplogId) {
DiskInitFile initFile = parent.getDiskInitFile();
return initFile.isCRFOplogIdPresent(oplogId) && initFile.hasKrf(oplogId) && initFile.hasIrf(oplogId);
}
protected void verifyOplogs(TLongHashSet foundCrfs, TLongHashSet foundDrfs) {
parent.getDiskInitFile().verifyOplogs(foundCrfs, foundDrfs);
}
private void deleteFileOnRecovery(File f) {
try {
if(f.delete()) {
parent.logger.info(LocalizedStrings.DiskStoreImpl_DELETE_ON_RECOVERY, new Object[] {f.getName(), parent.getName()});
}
}catch(Exception e) {
//ignore, one more attempt to delete the file failed
}
}
void addRecoveredOplog(Oplog oplog) {
basicAddToBeCompacted(oplog);
// don't schedule a compaction here. Wait for recovery to complete
}
/**
* Taking a lock on the LinkedHashMap oplogIdToOplog as it the operation of
* adding an Oplog to the Map & notifying the Compactor thread , if not already
* compaction has to be an atomic operation. add the oplog to the to be compacted
* set. if compactor thread is active and recovery is not going on then the
* compactor thread is notified of the addition
*/
void addToBeCompacted(Oplog oplog) {
basicAddToBeCompacted(oplog);
parent.scheduleCompaction();
}
private void basicAddToBeCompacted(Oplog oplog) {
if (!oplog.isRecovering() && oplog.hasNoLiveValues()) {
oplog.cancelKrf();
oplog.close(); // fix for bug 41687
oplog.deleteFiles(oplog.getHasDeletes());
} else {
int inactivePromotedCount = 0;
parent.getStats().incCompactableOplogs(1);
Long key = Long.valueOf(oplog.getOplogId());
synchronized (this.oplogIdToOplog) {
if (this.inactiveOplogs.remove(key) != null) {
if (oplog.isRAFOpen()) {
inactiveOpenCount.decrementAndGet();
}
inactivePromotedCount++;
}
// logger.info(LocalizedStrings.DEBUG, "DEBUG addToBeCompacted #" + oplog.getOplogId());
this.oplogIdToOplog.put(key, oplog);
}
if (inactivePromotedCount > 0) {
parent.getStats().incInactiveOplogs(-inactivePromotedCount);
}
}
}
private static class ValidateModeColocationChecker {
// Each element of the list will be one colocation map
// Each such map will have colocated prnames as the key and a list of bucket ids
private final List