swim.db.Chunk Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of swim-db Show documentation
Show all versions of swim-db Show documentation
Lock-free document store—optimized for high rate atomic state changes—that concurrently commits and compacts on-disk log-structured storage files without blocking parallel in-memory updates to associative B-tree maps, spatial Q-tree maps, sequential S-tree lists, and singleton U-tree values
The newest version!
// Copyright 2015-2024 Nstream, inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package swim.db;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import swim.codec.Binary;
import swim.codec.Output;
import swim.codec.OutputBuffer;
import swim.codec.Utf8;
import swim.collections.FingerTrieSeq;
public class Chunk {
final Database database;
final Commit commit;
final int post;
final int zone;
final Germ germ;
final long size;
final FingerTrieSeq trees;
final FingerTrieSeq pages;
public Chunk(Database database, Commit commit, int post, int zone, Germ germ,
long size, FingerTrieSeq trees, FingerTrieSeq pages) {
this.database = database;
this.commit = commit;
this.post = post;
this.zone = zone;
this.germ = germ;
this.size = size;
this.trees = trees;
this.pages = pages;
}
public final Database database() {
return this.database;
}
public final Commit commit() {
return this.commit;
}
public final int post() {
return this.post;
}
public final int zone() {
return this.zone;
}
public final Germ germ() {
return this.germ;
}
public final long size() {
return this.size;
}
public final FingerTrieSeq trees() {
return this.trees;
}
public final FingerTrieSeq pages() {
return this.pages;
}
public void soften() {
final long version = this.germ.version();
for (Tree tree : this.trees) {
tree.soften(version);
}
}
public void write(WritableByteChannel channel) {
try {
final FingerTrieSeq pages = this.pages;
for (int i = 0; i < pages.size(); i += 1) {
final Page page = pages.get(i);
final int pageSize = page.pageSize();
final OutputBuffer output = Binary.outputBuffer(new byte[pageSize]);
final Output encoder = Utf8.encodedOutput(output);
page.writePage(encoder);
final ByteBuffer pageBuffer = output.bind();
if (pageBuffer.remaining() != pageSize) {
throw new StoreException("serialized page size of " + pageBuffer.remaining() + " bytes "
+ "does not match expected page size of " + pageSize + " bytes");
}
do {
channel.write(pageBuffer);
} while (pageBuffer.hasRemaining());
if (pageBuffer.hasRemaining()) {
throw new StoreException("wrote incomplete chunk");
}
}
} catch (IOException cause) {
throw new StoreException(cause);
}
}
}