net.algart.arrays.DefaultDataFileModel Maven / Gradle / Ivy
Show all versions of algart Show documentation
/*
* The MIT License (MIT)
*
* Copyright (c) 2007-2024 Daniel Alievsky, AlgART Laboratory (http://algart.net)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.algart.arrays;
import java.io.*;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
/**
* Default implementation of {@link DataFileModel} that creates usual Java files,
* which are mapped via standard Java technique (FileChannel.map
method).
*
* The {@link DataFile data files}, returned by this class, creates
* {@link DataFile.BufferHolder buffer holders} with the
* {@link DataFile.BufferHolder#unmap(boolean) unmap(boolean)} method which does not perform
* actual unmapping: Java NIO package does not support unmapping.
* File mapping will be released automatically by the built-in finalizers.
*
* The {@link DataFile#close() close()} method in data files, returned by this class,
* perform closing via RandomAccessFile.close()
method,
* but it may not completely close the disk file!
* The disk file will be completely closed and all connected system resources will be freed
* only while the following garbage collection at the unspecified moment,
* or while exiting JVM.
* So, if you need to process a very large number of AlgART arrays (tens of thousands or millions),
* we recommend to use the {@link StandardIODataFileModel} and
* call {@link Array#freeResources(ArrayContext)} method in time.
*
* See comments to {@link #createTemporary(boolean)} method for information about temporary
* files created by this class.
*
* Warning: under Sun Java versions before 1.7 (i.e. 1.5 and 1.6),
* this data file model is not stable,
* due to the Sun's bug
* "(fc) "Cleaner terminated abnormally" error in simple mapping test".
* More precisely, for Java prior to 1.7:
*
*
* - In {@link #isLazyWriting() lazy-writing mode}, this model is unstable at all:
* processing large arrays can lead to internal error while garbage collection,
* that will lead to immediate abnormal JVM termination.
* Due to this reason, the {@link #defaultLazyWriting() default lazy-writing mode} is
false
* in Java versions prior to 1.7, but true
in Java 1.7+.
*
* - In usual mode, this model can occasionally lead to unexpected
IOError
* while processing large arrays. Unlike an internal error in the lazy-writing mode,
* this exception can be normally caught and shown to the user in GUI applications.
* It can occur with large {@link #recommendedBankSize(boolean) bank size}
* (32 MB or more), if an array occupies several banks.
* This probability of this situation is not too high for unresizable arrays
* when {@link #recommendedSingleMappingLimit() single mapping} is used.
*
*
* This class is immutable and thread-safe:
* there are no ways to modify settings of the created instance.
*
* @author Daniel Alievsky
* @see StandardIODataFileModel
*/
public class DefaultDataFileModel extends AbstractDataFileModel implements DataFileModel {
private static final int DEFAULT_NUMBER_OF_BANKS =
MappedDataStorages.MappingSettings.nearestCorrectNumberOfBanks(
Math.max(0, (int) Math.min((long) Integer.MAX_VALUE,
(long) InternalUtils.getIntProperty("net.algart.arrays.DefaultDataFileModel.numberOfBanksPerCPU", 3)
* (long) InternalUtils.availableProcessors())));
private static final int DEFAULT_BANK_SIZE =
InternalUtils.JAVA_32 ?
MappedDataStorages.MappingSettings.nearestCorrectBankSize(InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.bankSize32", 4194304)) : // 4M: for 4 kernels, up to 48M/array
MappedDataStorages.MappingSettings.nearestCorrectBankSize(InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.bankSize", 16777216)); // 16M: for 12 kernels, up to 576M/array
private static final int DEFAULT_RESIZABLE_BANK_SIZE =
InternalUtils.JAVA_32 ?
MappedDataStorages.MappingSettings.nearestCorrectBankSize(InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.resizableBankSize32", 2097152)) : // 2M
MappedDataStorages.MappingSettings.nearestCorrectBankSize(InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.resizableBankSize", 4194304)); // 4M
// These values must be not too high: for resizable arrays, it is a granularity of growing large files
private static final int DEFAULT_SINGLE_MAPPING_LIMIT =
InternalUtils.JAVA_32 ?
Math.max(0, InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.singleMappingLimit32", 4194304)) : // 4M: up to 4M/array
Math.max(0, InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.singleMappingLimit", 268435456)); // 256M: up to 256M/array
private static final boolean DEFAULT_AUTO_RESIZING_ON_MAPPING = InternalUtils.getBooleanProperty(
"net.algart.arrays.DefaultDataFileModel.autoResizingOnMapping", false);
private static final boolean DEFAULT_LAZY_WRITING = InternalUtils.getBooleanProperty(
"net.algart.arrays.DefaultDataFileModel.lazyWriting", InternalUtils.JAVA_7);
private static final String DEFAULT_FILE_WRITE_MODE = InternalUtils.getStringProperty(
"net.algart.arrays.DefaultDataFileModel.fileWriteMode", "rwd");
// used also in StandardIODataFileModel
private static final long DEFAULT_PREFIX_SIZE = Math.max(0L,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.prefixSize", 0)); // for debugging only
private static final int OPEN_SLEEP_DELAY = 200; // ms
private static final int OPEN_TIMEOUT = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.openTimeout", 5000));
// 5 sec sleeping time
private static final int MAP_SLEEP_DELAY = 200; // ms
private static final int MAP_TIMEOUT = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.mapTimeout", 600));
// 0.6 sec sleeping time
private static final int MAP_TIMEOUT_WITH_GC = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.mapTimeoutWithGc", 400));
// and 0.4 sec sleeping time with gc
private static final int FORCE_SLEEP_DELAY = 250; // ms
private static final int FORCE_TIMEOUT = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.forceTimeout", 15000));
// 15 sec sleeping time (40 attempts)
private static final int MEMORY_UTILIZATION_FORCE_TIMEOUT = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.memoryUtilizationForceTimeout", 1000));
// 1 sec sleeping time
private static final int WRITE_THROUGH_FORCE_TIMEOUT = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.writeThroughForceTimeout", 500)); // 0.5 sec sleeping time
private static final int NEXT_RELOAD_MIN_DELAY = Math.max(0,
InternalUtils.getIntProperty(
"net.algart.arrays.DefaultDataFileModel.nextReloadMinDelay", 2000)); // 2 sec
private static final long MAX_MAPPED_MEMORY = Math.min(1L << 56, Math.max(0L,
InternalUtils.getLongPropertyWithImportant(
"net.algart.arrays.maxMappedMemory", 536870912L))); // 512 MB
static final boolean UNSAFE_UNMAP_ON_EXIT = false;
// InternalUtils.getBooleanProperty(
// "net.algart.arrays.DefaultDataFileModel.unsafeUnmapOnExit", false);
// false by default; DOES NOT WORK since Java 9!
static final boolean UNSAFE_UNMAP_ON_DISPOSE = false;
// InternalUtils.getBooleanProperty(
// "net.algart.arrays.DefaultDataFileModel.unsafeUnmapOnDispose", false);
// false by default; not used now; DOES NOT WORK since Java 9!
static final boolean UNSAFE_UNMAP_ON_EXCEEDING_MAX_MAPPED_MEMORY = InternalUtils.getBooleanProperty(
"net.algart.arrays.DefaultDataFileModel.unsafeUnmapOnExceedingMaxMappedMemory", false);
// false by default
static final boolean GC_ON_EXCEEDING_MAX_MAPPED_MEMORY = InternalUtils.getBooleanProperty(
"net.algart.arrays.DefaultDataFileModel.gcOnExceedingMaxMappedMemory",
!UNSAFE_UNMAP_ON_EXCEEDING_MAX_MAPPED_MEMORY); // !UNSAFE_UNMAP_ON_EXCEEDING_MAX_MAPPED_MEMORY by default
private static final boolean CACHE_MAPPINGS = true;
// This flag may be set to false for debug goals only!
// It must be true to provide correct full DataFile.force() method.
private final boolean lazyWriting;
/**
* Default {@link #isLazyWriting() lazy-writing mode}, used when this this class
* is instantiated by a constructor without lazyWriting
argument.
* More precisely, if there is the system property
* "net.algart.arrays.DefaultDataFileModel.lazyWriting
",
* containing "true
" or "false
" string (in lower case),
* this method returns true
if this property contains "true
"
* and false
if this property contains "false
".
* If there is no such property, or if it contains illegal string
* (not "true
" or "false
"),
* or if some exception occurred while calling System.getProperty
,
* this method returns true
in Java 1.7 or higher Java version
* and false
in Java 1.5 and Java 1.6.
* The value of this system property is loaded and checked only once
* while initializing {@link DefaultDataFileModel} class.
*
* @return default {@link #isLazyWriting() lazy-writing mode}
*/
public static boolean defaultLazyWriting() {
return DEFAULT_LAZY_WRITING;
}
/**
* The maximal amount of RAM (in bytes), allowed for simultaneous mapping by FileChannel.map
method
* without flushing data to the disk by MappedByteBuffer.force()
method.
*
* This value is important while using {@link #isLazyWriting() lazy-writing mode}.
* In this case, a lot of mapping requests (calls of FileChannel.map
),
* with modifications of the mapped data and without further MappedByteBuffer.force()
,
* will use RAM for storing the changed data in the system cache.
* When all (or almost all) available RAM will be spent, it may lead to intensive disk swapping.
* The reason is that the mapped memory is not controlled by Java garbage collector:
* it is possible to map much more disk memory than Runtime.maxMemory()
.
* The result may be extremely slowing down of all other applications, working on the computer,
* and even practical impossibility of any work: all RAM will be occupied by your application.
*
*
To avoid this behavior, this class controls the total amount of mapped memory
* (summary size of all mapped buffers in all files),
* and when it exceeds the limit, returned by this method,
* calls MappedByteBuffer.force()
for all currently mapped buffers
* and, so, flushs the data to the disk and frees the system memory.
*
*
This value, returned by this method, is retrieved from the system property
* "net.algart.arrays.maxMappedMemory
",
* if it exists and contains a valid integer number.
* If this property contains zero or negative integer, this method returns 0, and
* it means that the amount of RAM for simultaneous mapping is not limited at all:
* the application will try to use all available system memory.
* If this property contains an integer greater than the limit 256~7.2*1016,
* this limit is used instead: it guarantees that using this value will not lead to integer overflow.
* If there is no such property, or if it contains not a number,
* or if some exception occurred while calling Long.getLong
,
* this method returns the default value 536870912
(512 MB).
* The value of this system property is loaded and checked only once
* while initializing {@link DefaultDataFileModel} class.
*
*
We recommend to always set this system property in serious applications,
* working with large AlgART arrays.
* The suitable value is about 50-100% of the current RAM installed on the computer.
* The default value 512 MB works well if you don't need to process larger amounts of data frequently.
*
* @return the maximal amount of RAM (in bytes), allowed for simultaneous mapping by this class.
*/
public static long maxMappedMemory() {
return MAX_MAPPED_MEMORY;
}
/**
* Equivalent to new {@link #DefaultDataFileModel(File, long, boolean)
* DefaultDataFileModel}(null, 0, {@link #defaultLazyWriting()})
.
*/
public DefaultDataFileModel() {
this(null, DEFAULT_PREFIX_SIZE, defaultLazyWriting());
}
/**
* Equivalent to new {@link #DefaultDataFileModel(File, long, boolean)
* DefaultDataFileModel}(null, 0, lazyWriting)
.
*
* @param lazyWriting it true
, lazy-writing mode will be used.
*/
public DefaultDataFileModel(boolean lazyWriting) {
this(null, DEFAULT_PREFIX_SIZE, lazyWriting);
}
/**
* Equivalent to new {@link #DefaultDataFileModel(File, long, boolean)
* DefaultDataFileModel}(tempPath, 0, {@link #defaultLazyWriting()})
.
*
* @param tempPath the path where new temporary files will be created
* by {@link #createTemporaryFile(boolean)} method
* or {@code null} if the default temporary-file directory is to be used.
*/
public DefaultDataFileModel(File tempPath) {
this(tempPath, DEFAULT_PREFIX_SIZE, defaultLazyWriting());
}
/**
* Equivalent to new {@link #DefaultDataFileModel(File, long, boolean)
* DefaultDataFileModel}(tempPath, 0, lazyWriting)
.
*
* @param tempPath the path where new temporary files will be created
* by {@link #createTemporaryFile(boolean)} method
* or {@code null} if the default temporary-file directory is to be used.
* @param lazyWriting it true
, lazy-writing mode will be used.
*/
public DefaultDataFileModel(File tempPath, boolean lazyWriting) {
this(tempPath, DEFAULT_PREFIX_SIZE, lazyWriting);
}
/**
* Creates new instance with specified lazy-writing mode.
*
*
Please see {@link AbstractDataFileModel#AbstractDataFileModel(File, long)} about
* tempPath
and prefixSize
arguments.
*
*
The lazyWriting
argument specifies whether the data files
* will use lazy writing mode. Namely, if this flag is set, flushing or unmapping
* the mapped regions via {@link DataFile.BufferHolder#flush(boolean) DataFile.BufferHolder.flush(false)},
* {@link DataFile.BufferHolder#unmap(boolean) DataFile.BufferHolder.unmap(false)} calls
* will not lead to immediate writing data
* to the disk file: this method will not do anything.
* Instead, the data will be really written by garbage collector.
* If this flag is not set, any call of {@link DataFile.BufferHolder#flush(boolean)}
* or {@link DataFile.BufferHolder#unmap(boolean)} method
* forces writing data to the disk by
* force()
method of MappedByteBuffer
class.
*
*
By default, if you use constructors without lazyWriting
argument,
* this flag is retrieved from the system property
* "net.algart.arrays.DefaultDataFileModel.lazyWriting
"
* or, if there is no such property,
* is set to true
in Java 1.7+ or false
in Java 1.5 and 1.6.
* Please see {@link #defaultLazyWriting()}.
*
*
Usually, you should set lazyWriting
flag to true
.
* It can essentially increase the performance, if you create and modify many large AlgART arrays,
* because OS will store the new data in the cache and will not physically write data to the disk.
* Even in this case, this class periodically flushs the unsaved data, when the summary
* amount of mapped buffers exceeds the limit returned by {@link #maxMappedMemory()} method.
*
*
The false
value of this flag may be recommended if the stable behavior of your application
* is more important than the speed. If lazy writing is disabled,
* the application will use less RAM and the risk of swapping will be much less,
* because each new data will be immediately saved to the disk and will not be cached in RAM.
*
*
Unfortunately, lazy-writing mode leads to internal Sun's bug in Java 1.5 and 1.6:
* we recommend never set it to true
in these Java versions.
* The detailed description of this bug is here:
* "(fc) "Cleaner terminated abnormally" error in simple mapping test".
*
* @param tempPath the path where new temporary files will be created
* by {@link #createTemporaryFile(boolean)} method
* or {@code null} if the default temporary-file directory is to be used.
* @param prefixSize the value returned by {@link #recommendedPrefixSize()} implementation in this class.
* @param lazyWriting it true
, lazy-writing mode will be used.
* @see #maxMappedMemory()
*/
public DefaultDataFileModel(File tempPath, long prefixSize, boolean lazyWriting) {
super(tempPath, prefixSize);
this.lazyWriting = lazyWriting;
}
/**
* Returns the lazyWriting
argument, passed to
* {@link #DefaultDataFileModel(boolean) the constructor} while creating this instance.
*
* @return the lazyWriting
flag, passed to the constructor.
*/
public final boolean isLazyWriting() {
return this.lazyWriting;
}
/**
* This implementation returns the data file corresponding to usual Java file new java.io.File(path)
* with {@link DataFile#map(net.algart.arrays.DataFile.Range, boolean) DataFile.map}
* method based on standard Java mapping.
*
*
This method never throws java.io.IOError
.
*
* @param path the path to disk file (as the argument of new java.io.File(path)
).
* @param byteOrder the byte order that will be always used for mapping this file.
* @return new instance of {@link DataFile} object.
* @throws NullPointerException if one of the passed arguments is {@code null}.
*/
public DataFile getDataFile(File path, ByteOrder byteOrder) {
Objects.requireNonNull(path, "Null path argument");
Objects.requireNonNull(byteOrder, "Null byteOrder argument");
return new MappableFile(path, byteOrder, lazyWriting);
}
/**
* Returns the absolute path to the disk file (java.io.File.getAbsoluteFile()
).
* The argument may be created by this data file model or by {@link StandardIODataFileModel}.
*
*
This method never throws java.io.IOError
.
*
* @param dataFile the data file.
* @return the absolute path to the disk file.
* @throws NullPointerException if the argument is {@code null}.
* @throws ClassCastException if the data file was created by data file model, other than
* {@link DefaultDataFileModel} or {@link StandardIODataFileModel}.
*/
public File getPath(DataFile dataFile) {
return ((MappableFile)dataFile).file;
}
/**
*
This implementation returns true
.
*
* @return true
.
*/
@Override
public boolean isAutoDeletionRequested() {
return true;
}
/**
*
This implementation returns the value
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.numberOfBanksPerCPU", 3)
* * {@link Arrays.SystemSettings#availableProcessors()}
,
* stored while initializing this {@link DefaultDataFileModel} class,
* or default value 3 * {@link Arrays.SystemSettings#availableProcessors()}
,
* if some exception occurred while calling Integer.getInteger
.
* If this value is less than 2, returns 2.
* If "net.algart.arrays.DefaultDataFileModel.numberOfBanksPerCPU" property contains negative or zero integer,
* returns 2.
*
*
Please note that many algorithms, on multiprocessor or multicore systems,
* use several parallel threads for processing arrays: see {@link Arrays.ParallelExecutor}.
* So, the number of banks should be enough for parallel using by all CPU units,
* to avoid frequently bank swapping.
* There should be at least 2 banks per each CPU unit,
* better 3-4 banks (for complex random-access algorithms).
* @return the recommended number of memory banks.
*/
@Override
public int recommendedNumberOfBanks() {
return DEFAULT_NUMBER_OF_BANKS;
}
/**
*
This implementation returns the value
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.bankSize",16777216)
(16 MB)
* when the argument is true
and
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.resizableBankSize",4194304)
(4 MB)
* when the argument is false
on 64-bit Java machines.
* On 32-bit JVM, this method returns
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.bankSize32",4194304)
(4 MB)
* when the argument is true
and
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.resizableBankSize32",2097152)
(2 MB)
* when the argument is false
.
* These values are stored while initializing {@link DefaultDataFileModel} class.
* If some exceptions occur while calling Integer.getInteger
,
* the default values 16777216 / 4194304 (for 64-bit Java) or
* 4194304 / 2097152 (for 32-bit Java) are returned.
* If this property contains invalid value (for example, not a power of two),
* this value is automatically corrected to the nearest valid one.
*
*
This method distinguishes between 32-bit and 64-bit Java via {@link Arrays.SystemSettings#isJava32()} method.
* Please remember that the result of that method is not 100% robust;
* so, please not specify too high values if you are not quite sure that your JVM is not 32-bit
* and has no 32-bit limitations for the address space.
*
* @param unresizable true
if this bank size will be used for unresizable arrays only.
* @return the recommended size of every memory bank in bytes.
*/
@Override
public int recommendedBankSize(boolean unresizable) {
return unresizable ? DEFAULT_BANK_SIZE : DEFAULT_RESIZABLE_BANK_SIZE;
}
/**
*
This implementation returns the value
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.singleMappingLimit",268435456)
(256 MB)
* on 64-bit Java machines. On 32-bit JVM, this method returns
* Integer.getInteger("net.algart.arrays.DefaultDataFileModel.singleMappingLimit32",4194304)
(4 MB).
* This value is stored while initializing {@link DefaultDataFileModel} class.
* If some exceptions occur while calling Integer.getInteger
,
* the default value 268435456 (or 4194304 for 32-bit Java) is returned.
*
*
This method distinguishes between 32-bit and 64-bit Java via {@link Arrays.SystemSettings#isJava32()} method.
* Please remember that the result of that method is not 100% robust;
* so, please not specify too high values if you are not quite sure that your JVM is not 32-bit
* and has no 32-bit limitations for the address space.
*
* @return the recommended limit for file size, in bytes, so that less files, if they are unresizable,
* should be mapped only once by single call of {@link DataFile#map} method.
*/
@Override
public int recommendedSingleMappingLimit() {
return DEFAULT_SINGLE_MAPPING_LIMIT;
}
/**
*
This implementation returns the value
* Boolean.getBoolean("net.algart.arrays.DefaultDataFileModel.autoResizingOnMapping")
,
* stored while initializing {@link DefaultDataFileModel} class,
* or false
if there is no such system property or some exception occurred while
* calling Boolean.getBoolean
.
*
* @return true
if mapping outside the file length automatically increase the length.
*/
@Override
public boolean autoResizingOnMapping() {
return DEFAULT_AUTO_RESIZING_ON_MAPPING;
}
/**
* This implementation returns "mapmm"
;
*
* @return "mapmm"
.
*/
@Override
public String temporaryFilePrefix() {
return "mapmm";
}
/**
* Returns a brief string description of this class.
*
*
The result of this method may depend on implementation.
*
* @return a brief string description of this object.
*/
public String toString() {
return "default data file model: " + recommendedNumberOfBanks()
+ " banks per " + recommendedBankSize(true) + "/" + recommendedBankSize(false) + " bytes, "
+ (recommendedSingleMappingLimit() > 0 ?
"single mapping until " + recommendedSingleMappingLimit() + " bytes, " : "")
+ (lazyWriting ? "lazy-writing" : "write-through");
}
// The reasons of this method are analogous to mapWithSeveralAttempts and forceWithSeveralAttempts:
// Windows NTFS sometimes cannot open file, because it is still "used" by abother process,
// alike flushing mapped blocks from the cache
private static RandomAccessFile openWithSeveralAttempts(File file, boolean readOnly) throws FileNotFoundException {
long t1 = System.nanoTime();
int numberOfAttempts = 0;
FileNotFoundException exception = null;
RandomAccessFile result = null;
for (int timeoutInMillis = OPEN_TIMEOUT; ; timeoutInMillis -= OPEN_SLEEP_DELAY) {
try {
result = new RandomAccessFile(file, readOnly ? "r" : DEFAULT_FILE_WRITE_MODE);
break;
} catch (FileNotFoundException e) {
if (readOnly && !file.exists()) {
throw e;
}
// strange situation: maybe, the Windows error
// "The process cannot access the file because it is being used by another process"
exception = e;
}
numberOfAttempts++;
if (timeoutInMillis <= 0) {
break;
}
try {
//noinspection BusyWait
Thread.sleep(OPEN_SLEEP_DELAY);
// System.out.println("Sleeping for " + file);
} catch (InterruptedException ex) {
break; // return the last exception if interrupted
}
}
long t2 = System.nanoTime();
if (result == null) {
assert exception != null;
LargeMemoryModel.LOGGER.warning(String.format(Locale.US,
"MMMM open: cannot open file in %.2f sec, "
+ numberOfAttempts + " attempts (%s; %s)",
(t2 - t1) * 1e-9, file, exception));
throw exception;
}
return result;
}
// The following method is useful to avoid a bug in 32-bit Java:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6776490
private static MappedByteBuffer mapWithSeveralAttempts(String fileName,
final FileChannel fileChannel, final FileChannel.MapMode mode,
final long position, final long size) throws IOException
{
// We should not use System.currentTimeMillis() below, but need to count calls of Thread.sleep:
// the system time, theoretically, can be changed during this loop
// System.out.print("Mapping " + Long.toHexString(position) + "...");
long t1 = System.nanoTime();
int numberOfAttempts = 0, numberOfGc = 0;
IOException exception = null;
MappedByteBuffer result = null;
for (int timeoutInMilliseconds = MAP_TIMEOUT + MAP_TIMEOUT_WITH_GC; ;
timeoutInMilliseconds -= MAP_SLEEP_DELAY)
{
try {
result = Arrays.SystemSettings.globalDiskSynchronizer().doSynchronously(fileName,
new Callable() {
public MappedByteBuffer call() throws IOException {
return fileChannel.map(mode, position, size);
}
});
break;
} catch (Exception ex) {
if (!(ex instanceof IOException)) {
throw new AssertionError("Unexpected exception type: " + ex);
}
exception = (IOException)ex;
}
numberOfAttempts++;
if (timeoutInMilliseconds <= 0) {
break;
}
boolean doGc = timeoutInMilliseconds <= MAP_TIMEOUT_WITH_GC;
LargeMemoryModel.LOGGER.config("MMMM map: problem with mapping data, new attempt #"
+ numberOfAttempts + (doGc ? " with gc" : ""));
if (doGc) {
System.gc();
numberOfGc++;
}
try {
//noinspection BusyWait
Thread.sleep(MAP_SLEEP_DELAY);
} catch (InterruptedException ex) {
break; // return the last exception if interrupted
}
}
long t2 = System.nanoTime();
if (result == null) {
assert exception != null;
LargeMemoryModel.LOGGER.warning(String.format(Locale.US,
"MMMM map: cannot map data in %.2f sec, "
+ numberOfAttempts + " attempts" + (numberOfGc > 0 ? ", " + numberOfGc + " with gc" : "")
+ " (%s; %s)",
(t2 - t1) * 1e-9, fileName, exception));
throw exception;
}
// System.out.println(" done: " + size + " bytes, "
// + size / 1048576.0 / ((t2 - t1) * 1e-9) + " MB/sec (" + fileName
// + ") in " + Thread.currentThread());
return result;
}
// The following method is useful to avoid a bug in Java 1.6:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6539707
private static Error forceWithSeveralAttempts(String fileName,
final MappedByteBuffer mbb,
DataFile.Range range, // for logging only
boolean memoryUtilization,
int timeoutInMillis)
{
Error resultError; // will be not null in a case of some exception inside mbb.force() method
// We should not use System.currentTimeMillis() below, but need to count calls of Thread.sleep:
// the system time, theoretically, can be changed during this loop
int attemptCount = 0;
long t1 = System.currentTimeMillis();
for (; ; timeoutInMillis -= FORCE_SLEEP_DELAY) {
resultError = null;
try {
attemptCount++;
Arrays.SystemSettings.globalDiskSynchronizer().doSynchronously(fileName, new Callable