com.kolibrifx.plovercrest.server.internal.engine.FanOutTable Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of plovercrest-server Show documentation
Show all versions of plovercrest-server Show documentation
Plovercrest server library.
The newest version!
/*
* Copyright (c) 2010-2017, KolibriFX AS. Licensed under the Apache License, version 2.0.
*/
package com.kolibrifx.plovercrest.server.internal.engine;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.AbstractList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.RandomAccess;
import org.apache.log4j.Logger;
class FanOutTable {
private static final int ENTRY_SIZE = 16; // size in bytes
private static final int READ_SIZE = ENTRY_SIZE * 256;
private static final Logger log = Logger.getLogger(FanOutTable.class);
static class Entry {
long filePosition;
long timestamp;
@Override
public String toString() {
return "{FanOutTable.Entry offset=" + filePosition + " timestamp=" + timestamp + "}";
}
}
/**
* This class provides thread-safe random access to a growing list of entries, as long as there
* is only one thread appending to it at once (single writer). Other operations, such as
* iteration, are not guaranteed to be thread-safe.
*
* This allows thread-safe binary search even when appending entries to the list.
*/
static class SingleWriterGrowingArrayList extends AbstractList implements RandomAccess {
private volatile int size;
private volatile E[] data;
@SuppressWarnings("unchecked")
SingleWriterGrowingArrayList() {
size = 0;
data = (E[]) new Object[1024]; // ugly, but works
}
private void ensureCapacity(final int capacity) {
if (capacity <= data.length) {
return;
}
final int newCapacity = Math.max(data.length * 2, capacity);
data = Arrays.copyOf(data, newCapacity);
}
// Note: this is safe to call during binary search
@Override
public boolean add(final E e) {
ensureCapacity(size + 1);
data[size] = e;
size++; // important that this happens AFTER assignment
return true;
}
// Note: this is NOT safe to call during binary search
@Override
public E remove(final int index) {
if (index < 0 && index >= size) {
throw new IndexOutOfBoundsException();
}
if (index != size - 1) {
throw new UnsupportedOperationException("Can only remove element at last index");
}
size--;
final E old = data[size];
data[size] = null;
return old;
}
@Override
public int size() {
return size;
}
@Override
public E get(final int index) {
if (index >= size) {
throw new IndexOutOfBoundsException();
}
return data[index];
}
}
private static Comparator entryComparator = new Comparator() {
@Override
public int compare(final Entry e1, final Entry e2) {
return Long.signum(e1.timestamp - e2.timestamp);
}
};
private SingleWriterGrowingArrayList entries;
private File filename;
private RandomAccessFile fileHandle;
private FileChannel file;
private final ByteBuffer writeBuffer;
private final ThreadLocal searchEntry;
FanOutTable(final File filename) {
this.filename = filename;
entries = new SingleWriterGrowingArrayList();
writeBuffer = ByteBuffer.allocate(ENTRY_SIZE);
searchEntry = new ThreadLocal() {
@Override
protected Entry initialValue() {
return new Entry();
}
};
}
private void writeEntry(final Entry e) throws IOException {
assert file != null;
try {
assert file.position() == file.size();
} catch (final Exception exception) {
log.error(exception);
log.info("Tried to access " + filename);
}
writeBuffer.clear();
writeBuffer.putLong(e.filePosition);
writeBuffer.putLong(e.timestamp);
writeBuffer.flip();
file.write(writeBuffer);
}
void readFromFile() throws IOException {
fileHandle = new RandomAccessFile(filename, "rw");
file = fileHandle.getChannel();
file.position(0);
final SingleWriterGrowingArrayList tmpEntries = new SingleWriterGrowingArrayList();
final ByteBuffer buffer = ByteBuffer.allocate(READ_SIZE);
while (file.read(buffer) >= ENTRY_SIZE) {
buffer.flip();
while (buffer.remaining() >= ENTRY_SIZE) {
final Entry entry = new Entry();
entry.filePosition = buffer.getLong();
entry.timestamp = buffer.getLong();
tmpEntries.add(entry);
}
if (buffer.remaining() > 0) {
break;
}
buffer.clear();
}
entries = tmpEntries;
truncateFileIfNeeded();
close();
}
void ensureFileReadyForWriting() throws IOException {
if (file != null) {
return;
}
fileHandle = new RandomAccessFile(filename, "rw");
file = fileHandle.getChannel();
if (file.size() != (entries.size() * ENTRY_SIZE)) {
log.warn("Incorrect file size, truncating file and writing " + entries.size() + " entries");
file.truncate(0);
for (final Entry e : entries) {
writeEntry(e);
}
} else {
// set channel position to end
file.position(file.size());
}
}
List getEntries() {
return Collections.unmodifiableList(entries);
}
Entry lastEntry() {
if (entries.size() == 0) {
return null;
} else {
return entries.get(entries.size() - 1);
}
}
int lastIndex() {
return entries.size - 1;
}
int seekTakePrevious(final long timestamp) {
// Note: this code is thread-safe, partly by accident. :)
//
// The search entry is thread-local, this is obviously thread-safe (and
// avoids creating garbage on
// each seek call, which can happen very frequently)
//
// Using binary search in a growing list is thread-safe, as long as
// existing entries do not change.
// The requirements for this is only that entries.size() and
// entries.get(index) give predictable
// results even if entries are appended from another thread. If entries
// are added during a binary
// search, it will simply continue searching in the range [0,
// oldSize-1].
//
// Since ArrayList does not provide the above guarantees, a custom List
// class is used instead.
// (SingleWriterGrowingArrayList)
final Entry se = searchEntry.get();
se.timestamp = timestamp;
final int index = Collections.binarySearch(entries, se, entryComparator);
if (index >= 0) {
// exact hit
return index;
}
final int insertionIndex = -index - 1;
if (insertionIndex == 0) {
// before the first element, return -1 as a special value
return -1;
}
return insertionIndex - 1;
}
Entry entryAt(final int index) {
if (index < 0) {
assert index == -1; // as returned from seekTakePrevious()
return null;
}
return entries.get(index);
}
int indexOf(final Entry entry) {
return entries.indexOf(entry);
}
private void truncateFileIfNeeded() throws IOException {
if (file == null) {
return;
}
final long newsize = entries.size() * ENTRY_SIZE;
if (newsize < file.size()) {
file.truncate(newsize);
// The documentation indicates that truncating should also update
// the position,
// but this doesn't appear to work, so set the position explicitly:
file.position(newsize);
}
}
void removeLastEntry() throws IOException {
assert entries.size() > 0;
entries.remove(entries.size() - 1);
truncateFileIfNeeded();
}
long lastTimestamp() {
if (lastEntry() == null) {
return -1;
} else {
return lastEntry().timestamp;
}
}
Entry append(final long filePosition, final long timestamp) {
final Entry entry = new Entry();
entry.filePosition = filePosition;
entry.timestamp = timestamp;
entries.add(entry);
return entry;
}
void appendAndWrite(final long filePosition, final long timestamp) throws IOException {
ensureFileReadyForWriting();
final Entry entry = append(filePosition, timestamp);
writeEntry(entry);
close();
}
void close() throws IOException {
if (file != null) {
file.close();
file = null;
}
if (fileHandle != null) {
fileHandle.close();
fileHandle = null;
}
}
}