org.mapdb.volume.Volume Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of mapdb Show documentation
Show all versions of mapdb Show documentation
MapDB provides concurrent Maps, Sets and Queues backed by disk storage or off-heap memory. It is a fast, scalable and easy to use embedded Java database.
/*
* Copyright (c) 2012 Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mapdb.volume;
import net.jpountz.xxhash.StreamingXXHash64;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.mapdb.CC;
import org.mapdb.DBException;
import org.mapdb.DataIO;
import org.mapdb.DataInput2;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
*
* MapDB abstraction over raw storage (file, disk partition, memory etc...).
*
*
* Implementations needs to be thread safe (especially
* 'ensureAvailable') operation.
* However updates do not have to be atomic, it is clients responsibility
* to ensure two threads are not writing/reading into the same location.
*
*
* @author Jan Kotek
*/
public abstract class Volume implements Closeable{
static int sliceShiftFromSize(long sizeIncrement) {
//PERF optimize this method with bitcount operation
sizeIncrement = DataIO.nextPowTwo(sizeIncrement);
for(int i=0;i<32;i++){
if((1L<
* If underlying storage is memory-mapped-file, this method will try to
* load and precache all file data into disk cache.
* Most likely it will call {@link MappedByteBuffer#load()},
* but could also read content of entire file etc
* This method will not pin data into memory, they might be removed at any time.
*
*
* @return true if this method did something, false if underlying storage does not support loading
*/
public boolean fileLoad(){
return false;
}
/**
* Check that all bytes between given offsets are zero. This might cross 1MB boundaries
* @param startOffset
* @param endOffset
*
* @throws DBException.DataCorruption if some byte is not zero
*/
public void assertZeroes(long startOffset, long endOffset) throws DBException.DataCorruption{
for(long offset=startOffset;offset>8));
putByte(offset+1, (byte) (value));
}
public int getUnsignedShort(long offset) {
return (( (getByte(offset) & 0xff) << 8) |
( (getByte(offset+1) & 0xff)));
}
public int getUnsignedByte(long offset) {
return getByte(offset) & 0xff;
}
public void putUnsignedByte(long offset, int b) {
putByte(offset, (byte) (b & 0xff));
}
public long getSixLong(long pos) {
return
((long) (getByte(pos++) & 0xff) << 40) |
((long) (getByte(pos++) & 0xff) << 32) |
((long) (getByte(pos++) & 0xff) << 24) |
((long) (getByte(pos++) & 0xff) << 16) |
((long) (getByte(pos++) & 0xff) << 8) |
((long) (getByte(pos) & 0xff));
}
public void putSixLong(long pos, long value) {
if(CC.ASSERT && (value>>>48!=0))
throw new DBException.DataCorruption("six long illegal value");
putByte(pos++, (byte) (0xff & (value >> 40)));
putByte(pos++, (byte) (0xff & (value >> 32)));
putByte(pos++, (byte) (0xff & (value >> 24)));
putByte(pos++, (byte) (0xff & (value >> 16)));
putByte(pos++, (byte) (0xff & (value >> 8)));
putByte(pos, (byte) (0xff & (value)));
}
/**
* Put packed long at given position.
*
* @param value to be written
* @return number of bytes consumed by packed value
*/
public int putPackedLong(long pos, long value){
//$DELAY$
int ret = 0;
int shift = 63-Long.numberOfLeadingZeros(value);
shift -= shift%7; // round down to nearest multiple of 7
while(shift!=0){
putByte(pos + (ret++), (byte) ((value >>> shift) & 0x7F));
//$DELAY$
shift-=7;
}
putByte(pos+(ret++),(byte) ((value & 0x7F)| 0x80));
return ret;
}
/**
* Unpack long value from the Volume. Highest 4 bits reused to indicate number of bytes read from Volume.
* One can use {@code result & DataIO.PACK_LONG_RESULT_MASK} to remove size;
*
* @param position to read value from
* @return The long value, minus highest byte
*/
public long getPackedLong(long position){
long ret = 0;
long pos2 = 0;
byte v;
do{
v = getByte(position+(pos2++));
ret = (ret<<7 ) | (v & 0x7F);
}while((v&0x80)==0);
return (pos2<<60) | ret;
}
abstract public boolean isReadOnly();
/** returns underlying file if it exists */
abstract public File getFile();
/** return true if this Volume holds exclusive lock over its file */
abstract public boolean getFileLocked();
/**
* Transfers data from this Volume into target volume.
* If its possible, the implementation should override this method to enable direct memory transfer.
*
* Caller must respect slice boundaries. ie it is not possible to transfer data which cross slice boundaries.
*
* @param inputOffset offset inside this Volume, ie data will be read from this offset
* @param target Volume to copy data into
* @param targetOffset position in target volume where data will be copied into
* @param size size of data to copy
*/
public void copyTo(long inputOffset, Volume target, long targetOffset, long size) {
//TODO size>Integer.MAX_VALUE
byte[] data = new byte[(int) size];
try {
getDataInput(inputOffset, (int) size).readFully(data);
}catch(IOException e){
throw new DBException.VolumeIOError(e);
}
target.putData(targetOffset,data,0, (int) size);
}
/**
* Set all bytes between {@code startOffset} and {@code endOffset} to zero.
* Area between offsets must be ready for write once clear finishes.
*/
public abstract void clear(final long startOffset, final long endOffset);
public void clearOverlap(final long startOffset, final long endOffset) {
if (CC.ASSERT && startOffset > endOffset)
throw new AssertionError();
final long bufSize = 1L << CC.PAGE_SHIFT;
long offset = Math.min(endOffset, DataIO.roundUp(startOffset, bufSize));
if (offset != startOffset) {
clear(startOffset, offset);
}
long prevOffset = offset;
offset = Math.min(endOffset, DataIO.roundUp(offset + 1, bufSize));
while (prevOffset < endOffset){
clear(prevOffset, offset);
prevOffset = offset;
offset = Math.min(endOffset, DataIO.roundUp(offset + 1, bufSize));
}
if(CC.ASSERT && prevOffset!=endOffset)
throw new AssertionError();
}
/**
* Copy content of this volume to another.
* Target volume might grow, but is never shrank.
* Target is also not synced
*/
public void copyTo(Volume to) {
final long volSize = length();
final long bufSize = 1L<< CC.PAGE_SHIFT;
to.ensureAvailable(volSize);
for(long offset=0;offset
* Calculates XXHash64 from this Volume content.
*
* @param off offset to start calculation from
* @param len length of data to calculate hash
* @param seed hash seed
* @return XXHash.
*/
public long hash(long off, long len, long seed){
final int blen = 128;
byte[] b = new byte[blen];
StreamingXXHash64 s = CC.HASH_FACTORY.newStreamingHash64(seed);
len +=off;
//round size to multiple of blen
int size = (int)Math.min(len-off,Math.min(blen, DataIO.roundUp(off, blen) - off));
getData(off,b,0,size);
s.update(b,0,size);
off+=size;
//read rest of the data
while (off1024*1024*128){
// bufSize = 64 * 1024; //something strange, set safe limit
// }
// to.ensureAvailable(size);
//
// for(long offset=0;offset 0) {
// wait until file becomes unlocked
try {
Thread.sleep(100);
} catch (InterruptedException e1) {
throw new DBException.Interrupted(e1);
}
fileLockWait -= 100;
continue; //timeout has not expired yet, try again
}
throw new DBException.FileLocked(file.toPath(), e);
} catch (IOException e) {
throw new DBException.VolumeIOError(e);
}
}
}
}