com.gemstone.gemfire.internal.cache.OverflowOplogSet Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-core Show documentation
Show all versions of gemfire-core Show documentation
SnappyData store based off Pivotal GemFireXD
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* Changes for SnappyData distributed computational and data platform.
*
* Portions Copyright (c) 2018 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.internal.cache;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import com.gemstone.gemfire.cache.DiskAccessException;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
public class OverflowOplogSet implements OplogSet {
private final AtomicInteger overflowOplogId = new AtomicInteger(0);
private OverflowOplog lastOverflowWrite = null;
private final ConcurrentMap overflowMap
= new ConcurrentHashMap();
private final Map compactableOverflowMap
= new LinkedHashMap();
private int lastOverflowDir = 0;
private DiskStoreImpl parent;
public OverflowOplogSet(DiskStoreImpl parent) {
this.parent = parent;
}
OverflowOplog getActiveOverflowOplog() {
return this.lastOverflowWrite;
}
@Override
public final void modify(LocalRegion lr, DiskEntry entry,
DiskEntry.Helper.ValueWrapper value, boolean async) {
DiskRegion dr = lr.getDiskRegion();
synchronized (this.overflowMap) {
if (this.lastOverflowWrite != null) {
if (this.lastOverflowWrite.modify(dr, entry, value, async)) {
return;
}
}
// Create a new one and put it on the front of the list.
OverflowOplog oo = createOverflowOplog(value.size());
addOverflow(oo);
this.lastOverflowWrite = oo;
boolean didIt = oo.modify(dr, entry, value, async);
assert didIt;
}
}
private long getMaxOplogSizeInBytes() {
return parent.getMaxOplogSizeInBytes();
}
private DirectoryHolder[] getDirectories() {
return parent.directories;
}
/**
* @param minSize the minimum size this oplog can be
*/
private OverflowOplog createOverflowOplog(long minSize) {
lastOverflowDir++;
if (lastOverflowDir >= getDirectories().length) {
lastOverflowDir=0;
}
int idx = -1;
long maxOplogSizeParam = getMaxOplogSizeInBytes();
if (maxOplogSizeParam < minSize) {
maxOplogSizeParam = minSize;
}
// first look for a directory that has room for maxOplogSize
for (int i = lastOverflowDir; i < getDirectories().length; i++) {
long availableSpace = getDirectories()[i].getAvailableSpace();
if (availableSpace >= maxOplogSizeParam) {
idx = i;
break;
}
}
if (idx == -1 && lastOverflowDir != 0) {
for (int i = 0; i < lastOverflowDir; i++) {
long availableSpace = getDirectories()[i].getAvailableSpace();
if (availableSpace >= maxOplogSizeParam) {
idx = i;
break;
}
}
}
if (idx == -1) {
// if we couldn't find one big enough for the max look for one
// that has min room
for (int i = lastOverflowDir; i < getDirectories().length; i++) {
long availableSpace = getDirectories()[i].getAvailableSpace() ;
if (availableSpace >= minSize) {
idx = i;
break;
}
}
if (idx == -1 && lastOverflowDir != 0) {
for (int i = 0; i < lastOverflowDir; i++) {
long availableSpace = getDirectories()[i].getAvailableSpace() ;
if (availableSpace >= minSize) {
idx = i;
break;
}
}
}
}
if (idx == -1) {
if (parent.isCompactionEnabled()) { // fix for bug 41835
idx = lastOverflowDir;
if (getDirectories()[idx].getAvailableSpace() < minSize) {
if (parent.logger.warningEnabled()) {
parent.logger.warning(LocalizedStrings.DiskRegion_COMPLEXDISKREGIONGETNEXTDIR_MAX_DIRECTORY_SIZE_WILL_GET_VIOLATED__GOING_AHEAD_WITH_THE_SWITCHING_OF_OPLOG_ANY_WAYS_CURRENTLY_AVAILABLE_SPACE_IN_THE_DIRECTORY_IS__0__THE_CAPACITY_OF_DIRECTORY_IS___1,
new Object[] {Long.valueOf(getDirectories()[idx].getUsedSpace()),
Long.valueOf(getDirectories()[idx].getCapacity())});
}
}
} else {
throw new DiskAccessException(LocalizedStrings.Oplog_DIRECTORIES_ARE_FULL_NOT_ABLE_TO_ACCOMODATE_THIS_OPERATIONSWITCHING_PROBLEM_FOR_ENTRY_HAVING_DISKID_0.toLocalizedString("needed " + minSize + " bytes"), parent);
}
}
int id = this.overflowOplogId.incrementAndGet();
lastOverflowDir = idx;
return new OverflowOplog(id, this, getDirectories()[idx], minSize);
}
final void addOverflow(OverflowOplog oo) {
this.overflowMap.put(oo.getOplogId(), oo);
}
final void removeOverflow(OverflowOplog oo) {
if (!basicRemoveOverflow(oo)) {
synchronized (this.compactableOverflowMap) {
this.compactableOverflowMap.remove(oo.getOplogId());
}
}
}
final boolean basicRemoveOverflow(OverflowOplog oo) {
if (this.lastOverflowWrite == oo) {
this.lastOverflowWrite = null;
}
return this.overflowMap.remove(oo.getOplogId(), oo);
}
public void closeOverflow() {
for (OverflowOplog oo: this.overflowMap.values()) {
oo.destroy();
}
synchronized (this.compactableOverflowMap) {
for (OverflowOplog oo: this.compactableOverflowMap.values()) {
oo.destroy();
}
}
}
final private void removeOverflow(DiskRegion dr, DiskEntry entry) {
// find the overflow oplog that it is currently in and remove the entry from it
DiskId id = entry.getDiskId();
synchronized (id) {
long oplogId = id.setOplogId(-1);
if (oplogId != -1) {
synchronized (this.overflowMap) { // to prevent concurrent remove see bug 41646
OverflowOplog oplog = getChild((int)oplogId);
if (oplog != null) {
oplog.remove(dr, entry);
}
}
}
}
}
void copyForwardForOverflowCompact(DiskEntry de,
DiskEntry.Helper.ValueWrapper value, byte userBits) {
synchronized (this.overflowMap) {
if (this.lastOverflowWrite != null) {
if (this.lastOverflowWrite.copyForwardForOverflowCompact(de, value, userBits)) {
return;
}
}
OverflowOplog oo = createOverflowOplog(value.size());
this.lastOverflowWrite = oo;
addOverflow(oo);
boolean didIt = oo.copyForwardForOverflowCompact(de, value, userBits);
assert didIt;
}
}
public final OverflowOplog getChild(long oplogId) {
//the oplog id is cast to an integer because the overflow
//map uses integer oplog ids.
return getChild((int) oplogId);
}
public final OverflowOplog getChild(int oplogId) {
OverflowOplog result = this.overflowMap.get(oplogId);
if (result == null) {
synchronized (this.compactableOverflowMap) {
result = this.compactableOverflowMap.get(oplogId);
}
}
return result;
}
@Override
public void create(LocalRegion region, DiskEntry entry,
DiskEntry.Helper.ValueWrapper value, boolean async) {
modify(region, entry, value, async);
}
@Override
public void remove(LocalRegion region, DiskEntry entry, boolean async,
boolean isClear) {
removeOverflow(region.getDiskRegion(), entry);
}
void addOverflowToBeCompacted(OverflowOplog oplog) {
synchronized (this.compactableOverflowMap) {
this.compactableOverflowMap.put(oplog.getOplogId(), oplog);
// logger.info(LocalizedStrings.DEBUG,
// "DEBUG addOverflowToBeCompacted oplog#" + oplog.getOplogId());
}
basicRemoveOverflow(oplog);
parent.scheduleCompaction();
}
public void getCompactableOplogs(List l, int max) {
synchronized (this.compactableOverflowMap)
{
Iterator itr = this.compactableOverflowMap.values()
.iterator();
while (itr.hasNext() && l.size() < max) {
OverflowOplog oplog = itr.next();
if (oplog.needsCompaction()) {
l.add(oplog);
}
}
}
}
void testHookCloseAllOverflowChannels() {
synchronized (this.overflowMap) {
for (OverflowOplog oo : this.overflowMap.values()) {
FileChannel oplogFileChannel = oo.getFileChannel();
try {
oplogFileChannel.close();
} catch (IOException ignore) {
}
}
}
synchronized (this.compactableOverflowMap) {
for (OverflowOplog oo : this.compactableOverflowMap.values()) {
FileChannel oplogFileChannel = oo.getFileChannel();
try {
oplogFileChannel.close();
} catch (IOException ignore) {
}
}
}
}
ArrayList testHookGetAllOverflowOplogs() {
ArrayList result = new ArrayList();
synchronized (this.overflowMap) {
for (OverflowOplog oo : this.overflowMap.values()) {
result.add(oo);
}
}
synchronized (this.compactableOverflowMap) {
for (OverflowOplog oo : this.compactableOverflowMap.values()) {
result.add(oo);
}
}
return result;
}
void testHookCloseAllOverflowOplogs() {
synchronized (this.overflowMap) {
for (OverflowOplog oo : this.overflowMap.values()) {
oo.close();
}
}
synchronized (this.compactableOverflowMap) {
for (OverflowOplog oo : this.compactableOverflowMap.values()) {
oo.close();
}
}
}
public DiskStoreImpl getParent() {
return parent;
}
}