org.mapdb.DBMaker Maven / Gradle / Ivy
Show all versions of mapdb Show documentation
/*
* Copyright (c) 2012 Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mapdb;
import org.mapdb.EngineWrapper.ReadOnlyEngine;
import java.io.File;
import java.io.IOError;
import java.io.IOException;
import java.util.*;
/**
* A builder class for creating and opening a database.
*
* @author Jan Kotek
*/
public class DBMaker> {
protected final String TRUE = "true";
protected interface Keys{
String cache = "cache";
String cacheSize = "cacheSize";
String cache_disable = "disable";
String cache_hashTable = "hashTable";
String cache_hardRef = "hardRef";
String cache_softRef = "softRef";
String cache_weakRef = "weakRef";
String cache_lru = "lru";
String file = "file";
String volume = "volume";
String volume_raf = "raf";
String volume_rafIfNeeded = "rafIfNeeded";
String volume_rafIndexMapped = "rafIndexMapped";
String volume_mmapf = "mmapf";
String volume_heap = "heap";
String volume_offheap = "offheap";
String store = "store";
String store_direct = "direct";
String store_wal = "wal";
String store_append = "append";
String store_heap = "heap";
String transactionDisable = "transactionDisable";
String asyncWriteDisable = "asyncWriteDisable";
String asyncFlushDelay = "asyncFlushDelay";
String deleteFilesAfterClose = "deleteFilesAfterClose";
String closeOnJvmShutdown = "closeOnJvmShutdown";
String readOnly = "readOnly";
String compression = "compression";
String compression_lzf = "lzf";
String encryptionKey = "encryptionKey";
String encryption = "encryption";
String encryption_xtea = "xtea";
String checksum = "checksum";
String freeSpaceReclaimQ = "freeSpaceReclaimQ";
String syncOnCommitDisable = "syncOnCommitDisable";
String snapshots = "snapshots";
String strictDBGet = "strictDBGet";
String fullChunkAllocation = "fullChunkAllocation";
String sizeLimit = "sizeLimit";
String fullTx = "fullTx";
}
protected Properties props = new Properties();
/** use static factory methods, or make subclass */
protected DBMaker(){}
protected DBMaker(File file) {
props.setProperty(Keys.file, file.getPath());
}
/** Creates new in-memory database. Changes are lost after JVM exits.
*
* This will use HEAP memory so Garbage Collector is affected.
*/
public static DBMaker newMemoryDB(){
return new DBMaker()._newMemoryDB();
}
public DBMakerT _newMemoryDB(){
props.setProperty(Keys.volume,Keys.volume_heap);
return getThis();
}
/** Creates new in-memory database. Changes are lost after JVM exits.
*
* This will use DirectByteBuffer outside of HEAP, so Garbage Collector is not affected
*
*/
public static DBMaker newDirectMemoryDB(){
return new DBMaker()._newDirectMemoryDB();
}
public DBMakerT _newDirectMemoryDB() {
props.setProperty(Keys.volume,Keys.volume_offheap);
return getThis();
}
/**
* Creates or open append-only database stored in file.
* This database uses format other than usual file db
*
* @param file
* @return maker
*/
public static DBMaker newAppendFileDB(File file) {
return new DBMaker()._newAppendFileDB(file);
}
public DBMakerT _newAppendFileDB(File file) {
props.setProperty(Keys.file, file.getPath());
props.setProperty(Keys.store, Keys.store_append);
return getThis();
}
/**
* Create new BTreeMap backed by temporary file storage.
* This is quick way to create 'throw away' collection.
*
* Storage is created in temp folder and deleted on JVM shutdown
*/
public static BTreeMap newTempTreeMap(){
return newTempFileDB()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.transactionDisable()
.make()
.getTreeMap("temp");
}
/**
* Create new HTreeMap backed by temporary file storage.
* This is quick way to create 'throw away' collection.
*
* Storage is created in temp folder and deleted on JVM shutdown
*/
public static HTreeMap newTempHashMap(){
return newTempFileDB()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.transactionDisable()
.make()
.getHashMap("temp");
}
/**
* Create new TreeSet backed by temporary file storage.
* This is quick way to create 'throw away' collection.
*
* Storage is created in temp folder and deleted on JVM shutdown
*/
public static NavigableSet newTempTreeSet(){
return newTempFileDB()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.transactionDisable()
.make()
.getTreeSet("temp");
}
/**
* Create new HashSet backed by temporary file storage.
* This is quick way to create 'throw away' collection.
*
* Storage is created in temp folder and deleted on JVM shutdown
*/
public static Set newTempHashSet(){
return newTempFileDB()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.transactionDisable()
.make()
.getHashSet("temp");
}
/**
* Creates new database in temporary folder.
*/
public static DBMaker newTempFileDB() {
try {
return newFileDB(File.createTempFile("mapdb-temp","db"));
} catch (IOException e) {
throw new IOError(e);
}
}
/** Creates or open database stored in file. */
public static DBMaker newFileDB(File file){
return new DBMaker(file);
}
public DBMakerT _newFileDB(File file){
props.setProperty(Keys.file, file.getPath());
return getThis();
}
protected DBMakerT getThis(){
return (DBMakerT)this;
}
/**
* Transaction journal is enabled by default
* You must call DB.commit() to save your changes.
* It is possible to disable transaction journal for better write performance
* In this case all integrity checks are sacrificed for faster speed.
*
* If transaction journal is disabled, all changes are written DIRECTLY into store.
* You must call DB.close() method before exit,
* otherwise your store WILL BE CORRUPTED
*
*
* @return this builder
*/
public DBMakerT transactionDisable(){
props.put(Keys.transactionDisable,TRUE);
return getThis();
}
/**
* Instance cache is enabled by default.
* This greatly decreases serialization overhead and improves performance.
* Call this method to disable instance cache, so an object will always be deserialized.
*
* This may workaround some problems
*
* @return this builder
*/
public DBMakerT cacheDisable(){
props.put(Keys.cache,Keys.cache_disable);
return getThis();
}
/**
* Enables unbounded hard reference cache.
* This cache is good if you have lot of available memory.
*
* All fetched records are added to HashMap and stored with hard reference.
* To prevent OutOfMemoryExceptions MapDB monitors free memory,
* if it is bellow 25% cache is cleared.
*
* @return this builder
*/
public DBMakerT cacheHardRefEnable(){
props.put(Keys.cache,Keys.cache_hardRef);
return getThis();
}
/**
* Enables unbounded cache which uses WeakReference
.
* Items are removed from cache by Garbage Collector
*
* @return this builder
*/
public DBMakerT cacheWeakRefEnable(){
props.put(Keys.cache,Keys.cache_weakRef);
return getThis();
}
/**
* Enables unbounded cache which uses SoftReference
.
* Items are removed from cache by Garbage Collector
*
* @return this builder
*/
public DBMakerT cacheSoftRefEnable(){
props.put(Keys.cache,Keys.cache_softRef);
return getThis();
}
/**
* Enables Least Recently Used cache. It is fixed size cache and it removes less used items to make space.
*
* @return this builder
*/
public DBMakerT cacheLRUEnable(){
props.put(Keys.cache,Keys.cache_lru);
return getThis();
}
/**
* Enables compatibility storage mode for 32bit JVMs.
*
* By default MapDB uses memory mapped files. However 32bit JVM can only address 4GB of memory.
* Also some older JVMs do not handle large memory mapped files well.
* We can use {@code FileChannel} which does not use memory mapped files, but is slower.
* Use this if you are experiencing java.lang.OutOfMemoryError: Map failed exceptions
*
* This options disables memory mapped files but causes storage to be slower.
*/
public DBMakerT randomAccessFileEnable() {
props.setProperty(Keys.volume,Keys.volume_raf);
return getThis();
}
/** Same as {@code randomAccessFileEnable()}, but part of storage is kept memory mapped.
* This mode is good performance compromise between memory mapped files and RAF.
*
* Index file is typically 5% of storage. It contains small frequently read values,
* which is where memory mapped file excel.
*
* With this mode you will experience java.lang.OutOfMemoryError: Map failed exceptions
* eventually. But storage size limit is pushed to somewhere around 40GB.
*
*/
public DBMakerT randomAccessFileEnableKeepIndexMapped() {
props.setProperty(Keys.volume,Keys.volume_rafIndexMapped);
return getThis();
}
/**
* Check current JVM for known problems. If JVM does not handle large memory files well, this option
* disables memory mapped files, and use safer and slower {@code RandomAccessFile} instead.
*/
public DBMakerT randomAccessFileEnableIfNeeded() {
props.setProperty(Keys.volume,Keys.volume_rafIfNeeded);
return getThis();
}
/**
* Set cache size. Interpretations depends on cache type.
* For fixed size caches (such as FixedHashTable cache) it is maximal number of items in cache.
*
* For unbounded caches (such as HardRef cache) it is initial capacity of underlying table (HashMap).
*
* Default cache size is 32768.
*
* @param cacheSize new cache size
* @return this builder
*/
public DBMakerT cacheSize(int cacheSize){
props.setProperty(Keys.cacheSize,""+cacheSize);
return getThis();
}
/**
* MapDB supports snapshots. `TxEngine` requires additional locking which has small overhead when not used.
* Snapshots are disabled by default. This option switches the snapshots on.
*
* @return this builder
*/
public DBMakerT snapshotEnable(){
props.setProperty(Keys.snapshots,TRUE);
return getThis();
}
/**
* By default all modifications are queued and written into disk on Background Writer Thread.
* So all modifications are performed in asynchronous mode and do not block.
*
* It is possible to disable Background Writer Thread, but this greatly hurts concurrency.
* Without async writes, all threads blocks until all previous writes are not finished (single big lock).
*
*
* This may workaround some problems
*
* @return this builder
*/
public DBMakerT asyncWriteDisable(){
props.setProperty(Keys.asyncWriteDisable,TRUE);
return getThis();
}
/**
* Set flush iterval for write cache, by default is 0
*
* When BTreeMap is constructed from ordered set, tree node size is increasing linearly with each
* item added. Each time new key is added to tree node, its size changes and
* storage needs to find new place. So constructing BTreeMap from ordered set leads to large
* store fragmentation.
*
* Setting flush interval is workaround as BTreeMap node is always updated in memory (write cache)
* and only final version of node is stored on disk.
*
*
* @param delay flush write cache every N miliseconds
* @return this builder
*/
public DBMakerT asyncFlushDelay(int delay){
props.setProperty(Keys.asyncFlushDelay,""+delay);
return getThis();
}
/**
* Try to delete files after DB is closed.
* File deletion may silently fail, especially on Windows where buffer needs to be unmapped file delete.
*
* @return this builder
*/
public DBMakerT deleteFilesAfterClose(){
props.setProperty(Keys.deleteFilesAfterClose,TRUE);
return getThis();
}
/**
* Adds JVM shutdown hook and closes DB just before JVM;
*
* @return this builder
*/
public DBMakerT closeOnJvmShutdown(){
props.setProperty(Keys.closeOnJvmShutdown,TRUE);
return getThis();
}
/**
* Enables record compression.
*
* Make sure you enable this every time you reopen store, otherwise record de-serialization fails unpredictably.
*
* @return this builder
*/
public DBMakerT compressionEnable(){
props.setProperty(Keys.compression,Keys.compression_lzf);
return getThis();
}
/**
* Encrypt storage using XTEA algorithm.
*
* XTEA is sound encryption algorithm. However implementation in MapDB was not peer-reviewed.
* MapDB only encrypts records data, so attacker may see number of records and their sizes.
*
* Make sure you enable this every time you reopen store, otherwise record de-serialization fails unpredictably.
*
* @param password for encryption
* @return this builder
*/
public DBMakerT encryptionEnable(String password){
return encryptionEnable(password.getBytes(Utils.UTF8_CHARSET));
}
/**
* Encrypt storage using XTEA algorithm.
*
* XTEA is sound encryption algorithm. However implementation in MapDB was not peer-reviewed.
* MapDB only encrypts records data, so attacker may see number of records and their sizes.
*
* Make sure you enable this every time you reopen store, otherwise record de-serialization fails unpredictably.
*
* @param password for encryption
* @return this builder
*/
public DBMakerT encryptionEnable(byte[] password){
props.setProperty(Keys.encryption, Keys.encryption_xtea);
props.setProperty(Keys.encryptionKey,Utils.toHexa(password));
return getThis();
}
/**
* Adds Adler32 checksum at end of each record to check data integrity.
* It throws 'IOException("Checksum does not match, data broken")' on de-serialization if data are corrupted
*
* Make sure you enable this every time you reopen store, otherwise record de-serialization fails.
*
* @return this builder
*/
public DBMakerT checksumEnable(){
props.setProperty(Keys.checksum,TRUE);
return getThis();
}
/**
* DB Get methods such as {@link DB#getTreeMap(String)} or {@link DB#getAtomicLong(String)} auto create
* new record with default values, if record with given name does not exist. This could be problem if you would like to enforce
* stricter database schema. So this parameter disables record auto creation.
*
* If this set, `DB.getXX()` will throw an exception if given name does not exist, instead of creating new record (or collection)
*
* @return this builder
*/
public DBMakerT strictDBGet(){
props.setProperty(Keys.strictDBGet,TRUE);
return getThis();
}
/**
* Open store in read-only mode. Any modification attempt will throw
* UnsupportedOperationException("Read-only")
*
* @return this builder
*/
public DBMakerT readOnly(){
props.setProperty(Keys.readOnly,TRUE);
return getThis();
}
/**
* Set free space reclaim Q. It is value from 0 to 10, indicating how eagerly MapDB
* searchs for free space inside store to reuse, before expanding store file.
* 0 means that no free space will be reused and store file will just grow (effectively append only).
* 10 means that MapDB tries really hard to reuse free space, even if it may hurt performance.
* Default value is 5;
*
*
* @return this builder
*/
public DBMakerT freeSpaceReclaimQ(int q){
if(q<0||q>10) throw new IllegalArgumentException("wrong Q");
props.setProperty(Keys.freeSpaceReclaimQ,""+q);
return getThis();
}
/**
* Enables power saving mode.
* Typically MapDB runs daemon threads in infinitive cycle with delays and spin locks:
*
* while(true){
* Thread.sleep(1000);
* doSomething();
* }
*
* while(write_finished){
* write_chunk;
* sleep(10 nanoseconds) //so OS gets chance to finish async writing
* }
*
*
* This brings bit more stability (prevents deadlocks) and some extra speed.
* However it causes higher CPU usage then necessary, also CPU wakes-up every
* N seconds.
*
* On power constrained devices (phones, laptops..) trading speed for energy
* consumption is not desired. So this settings tells MapDB to prefer
* energy efficiency over speed and stability. This is global settings, so
* this settings may affects any MapDB part where this settings makes sense
*
* Currently is used only in {@link AsyncWriteEngine} where power settings
* may prevent Background Writer Thread from exiting, if main thread dies.
*
* @return this builder
*/
// public DBMaker powerSavingModeEnable(){
// this._powerSavingMode = true;
// return this;
// }
/**
* Disables file sync on commit. This way transactions are preserved (rollback works),
* but commits are not 'durable' and data may be lost if store is not properly closed.
* File store will get properly synced when closed.
* Disabling this will make commits faster.
*
* @return this builder
*/
public DBMakerT syncOnCommitDisable(){
props.setProperty(Keys.syncOnCommitDisable,TRUE);
return getThis();
}
/**
* Sets store size limit. Disk or memory space consumed be storage should not grow over this space.
* Limit is not strict and does not apply to some parts such as index table. Actual store size might
* be 10% or more bigger.
*
*
* @param maxSize maximal store size in GB
* @return this builder
*/
public DBMakerT sizeLimit(double maxSize){
long size = (long) (maxSize * 1024D*1024D*1024D);
props.setProperty(Keys.sizeLimit,""+size);
return getThis();
}
/**
* Allocate new space in 1GB chunks rather than small increments.
*
*
* This will consume more space (not yet used space will be consumed).
* But it will also improve performance and reduce remapping.
*
*/
public DBMakerT fullChunkAllocationEnable(){
props.setProperty(Keys.fullChunkAllocation,TRUE);
return getThis();
}
/** constructs DB using current settings */
public DB make(){
return new DB(makeEngine(), propsGetBool(Keys.strictDBGet));
}
public TxMaker makeTxMaker(){
props.setProperty(Keys.fullTx,TRUE);
snapshotEnable();
asyncWriteDisable();
Engine e = makeEngine();
if(!(e instanceof TxEngine)) throw new IllegalArgumentException("Snapshot must be enabled for TxMaker");
//init catalog if needed
DB db = new DB(e);
db.commit();
return new TxMaker((TxEngine) e);
}
/** constructs Engine using current settings */
public Engine makeEngine(){
final boolean readOnly = propsGetBool(Keys.readOnly);
final File file = props.containsKey(Keys.file)? new File(props.getProperty(Keys.file)):null;
final String volume = props.getProperty(Keys.volume);
final String store = props.getProperty(Keys.store);
if(readOnly && file==null)
throw new UnsupportedOperationException("Can not open in-memory DB in read-only mode.");
if(readOnly && !file.exists() && !Keys.store_append.equals(store)){
throw new UnsupportedOperationException("Can not open non-existing file in read-only mode.");
}
if(propsGetLong(Keys.sizeLimit,0)>0 && Keys.store_append.equals(store))
throw new UnsupportedOperationException("Append-Only store does not support Size Limit");
extendArgumentCheck();
Engine engine;
if(!Keys.store_append.equals(store)){
Volume.Factory folFac = extendStoreVolumeFactory();
engine = propsGetBool(Keys.transactionDisable) ?
extendStoreDirect(folFac):
extendStoreWAL(folFac);
}else{
if(Keys.volume_heap.equals(volume)||Keys.volume_offheap.equals(volume))
throw new UnsupportedOperationException("Append Storage format is not supported with in-memory dbs");
engine = extendStoreAppend();
}
engine = extendWrapStore(engine);
if(!propsGetBool(Keys.asyncWriteDisable) && !readOnly){
engine = extendAsyncWriteEngine(engine);
}
final String cache = props.getProperty(Keys.cache, CC.DEFAULT_CACHE);
if(Keys.cache_disable.equals(cache)){
//do not wrap engine in cache
}else if(Keys.cache_hashTable.equals(cache)){
engine = extendCacheHashTable(engine);
}else if (Keys.cache_hardRef.equals(cache)){
engine = extendCacheHardRef(engine);
}else if (Keys.cache_weakRef.equals(cache)){
engine = extendCacheWeakRef(engine);
}else if (Keys.cache_softRef.equals(cache)){
engine = extendCacheSoftRef(engine);
}else if (Keys.cache_lru.equals(cache)){
engine = extendCacheLRU(engine);
}else{
throw new IllegalArgumentException("unknown cache type: "+cache);
}
engine = extendWrapCache(engine);
if(propsGetBool(Keys.snapshots))
engine = extendSnapshotEngine(engine);
engine = extendWrapSnapshotEngine(engine);
if(readOnly)
engine = new ReadOnlyEngine(engine);
if(propsGetBool(Keys.closeOnJvmShutdown)){
final Engine engine2 = engine;
Runtime.getRuntime().addShutdownHook(new Thread("MapDB shutdown") {
@Override
public void run() {
if(!engine2.isClosed())
extendShutdownHookBefore(engine2);
engine2.close();
extendShutdownHookAfter(engine2);
}
});
}
//try to read one record from DB, to make sure encryption and compression are correctly set.
Fun.Tuple2 check = null;
try{
check = (Fun.Tuple2) engine.get(Engine.CHECK_RECORD, Serializer.BASIC);
if(check!=null){
if(check.a.intValue()!= check.b.hashCode())
throw new RuntimeException("invalid checksum");
}
}catch(Throwable e){
throw new IllegalArgumentException("Error while opening store. Make sure you have right password, compression or encryption is well configured.",e);
}
if(check == null && !engine.isReadOnly()){
//new db, so insert testing record
String s = Utils.randomString(127); //random string so it is not that easy to decrypt
check = Fun.t2(s.hashCode(), s);
engine.update(Engine.CHECK_RECORD, check, Serializer.BASIC);
engine.commit();
}
return engine;
}
protected int propsGetInt(String key, int defValue){
String ret = props.getProperty(key);
if(ret==null) return defValue;
return Integer.valueOf(ret);
}
protected long propsGetLong(String key, long defValue){
String ret = props.getProperty(key);
if(ret==null) return defValue;
return Long.valueOf(ret);
}
protected boolean propsGetBool(String key){
String ret = props.getProperty(key);
return ret!=null && ret.equals(TRUE);
}
protected byte[] propsGetXteaEncKey(){
if(!Keys.encryption_xtea.equals(props.getProperty(Keys.encryption)))
return null;
return Utils.fromHexa(props.getProperty(Keys.encryptionKey));
}
protected int propsGetRafMode(){
String volume = props.getProperty(Keys.volume);
if(volume==null||Keys.volume_mmapf.equals(volume)){
return 0;
}else if(Keys.volume_rafIfNeeded.equals(volume)){
return Utils.JVMSupportsLargeMappedFiles()?0:2;
}else if(Keys.volume_rafIndexMapped.equals(volume)){
return 1;
}else if(Keys.volume_raf.equals(volume)){
return 2;
}
return 0;
}
protected void extendShutdownHookBefore(Engine engine) {
}
protected void extendShutdownHookAfter(Engine engine) {
}
protected TxEngine extendSnapshotEngine(Engine engine) {
return new TxEngine(engine,propsGetBool(Keys.fullTx));
}
protected Caches.LRU extendCacheLRU(Engine engine) {
int cacheSize = propsGetInt(Keys.cacheSize, CC.DEFAULT_CACHE_SIZE);
return new Caches.LRU(engine, cacheSize);
}
protected Caches.WeakSoftRef extendCacheWeakRef(Engine engine) {
return new Caches.WeakSoftRef(engine,true);
}
protected Caches.WeakSoftRef extendCacheSoftRef(Engine engine) {
return new Caches.WeakSoftRef(engine,false);
}
protected Caches.HardRef extendCacheHardRef(Engine engine) {
int cacheSize = propsGetInt(Keys.cacheSize, CC.DEFAULT_CACHE_SIZE);
return new Caches.HardRef(engine,cacheSize);
}
protected Caches.HashTable extendCacheHashTable(Engine engine) {
int cacheSize = propsGetInt(Keys.cacheSize, CC.DEFAULT_CACHE_SIZE);
return new Caches.HashTable(engine, cacheSize);
}
protected AsyncWriteEngine extendAsyncWriteEngine(Engine engine) {
return new AsyncWriteEngine(engine,
propsGetInt(Keys.asyncFlushDelay,CC.ASYNC_WRITE_FLUSH_DELAY),
null);
}
protected void extendArgumentCheck() {
}
protected Engine extendWrapStore(Engine engine) {
return engine;
}
protected Engine extendWrapCache(Engine engine) {
return engine;
}
protected Engine extendWrapSnapshotEngine(Engine engine) {
return engine;
}
protected StoreAppend extendStoreAppend() {
final File file = props.containsKey(Keys.file)? new File(props.getProperty(Keys.file)):null;
boolean compressionEnabled = Keys.compression_lzf.equals(props.getProperty(Keys.compression));
return new StoreAppend(file, propsGetRafMode()>0, propsGetBool(Keys.readOnly),
propsGetBool(Keys.transactionDisable),
propsGetBool(Keys.deleteFilesAfterClose),
propsGetBool(Keys.syncOnCommitDisable),
propsGetBool(Keys.checksum),compressionEnabled,propsGetXteaEncKey());
}
protected Store extendStoreDirect(Volume.Factory folFac) {
boolean compressionEnabled = Keys.compression_lzf.equals(props.getProperty(Keys.compression));
return new StoreDirect(folFac, propsGetBool(Keys.readOnly),
propsGetBool(Keys.deleteFilesAfterClose),
propsGetInt(Keys.freeSpaceReclaimQ,CC.DEFAULT_FREE_SPACE_RECLAIM_Q),
propsGetBool(Keys.syncOnCommitDisable),propsGetLong(Keys.sizeLimit,0),
propsGetBool(Keys.checksum),compressionEnabled,propsGetXteaEncKey(),
propsGetBool(Keys.fullChunkAllocation));
}
protected Store extendStoreWAL(Volume.Factory folFac) {
boolean compressionEnabled = Keys.compression_lzf.equals(props.getProperty(Keys.compression));
return new StoreWAL(folFac, propsGetBool(Keys.readOnly),propsGetBool(Keys.deleteFilesAfterClose),
propsGetInt(Keys.freeSpaceReclaimQ,CC.DEFAULT_FREE_SPACE_RECLAIM_Q),
propsGetBool(Keys.syncOnCommitDisable),propsGetLong(Keys.sizeLimit,-1),
propsGetBool(Keys.checksum),compressionEnabled,propsGetXteaEncKey(),
propsGetBool(Keys.fullChunkAllocation) );
}
protected Volume.Factory extendStoreVolumeFactory() {
long sizeLimit = propsGetLong(Keys.sizeLimit,0);
boolean fullChunkAlloc = propsGetBool(Keys.fullChunkAllocation);
String volume = props.getProperty(Keys.volume);
if(Keys.volume_heap.equals(volume))
return Volume.memoryFactory(false,sizeLimit, fullChunkAlloc);
else if(Keys.volume_offheap.equals(volume))
return Volume.memoryFactory(true,sizeLimit, fullChunkAlloc);
File file = new File(props.getProperty(Keys.file));
return Volume.fileFactory(propsGetBool(Keys.readOnly), propsGetRafMode(), file,
sizeLimit, fullChunkAlloc);
}
}