Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.Writer;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.h2.compress.CompressDeflate;
import org.h2.compress.CompressLZF;
import org.h2.compress.Compressor;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.mvstore.tx.TransactionStore;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.StringDataType;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FileUtils;
import org.h2.util.Utils;
/**
* Utility methods used in combination with the MVStore.
*/
public class MVStoreTool {
/**
* Runs this tool.
* Options are case sensitive. Supported options are:
*
*
[-dump <fileName>]
*
Dump the contends of the file
*
[-info <fileName>]
*
Get summary information about a file
*
[-compact <fileName>]
*
Compact a store
*
[-compress <fileName>]
*
Compact a store with compression enabled
*
*
* @param args the command line arguments
*/
public static void main(String... args) {
for (int i = 0; i < args.length; i++) {
if ("-dump".equals(args[i])) {
String fileName = args[++i];
dump(fileName, new PrintWriter(System.out), true);
} else if ("-info".equals(args[i])) {
String fileName = args[++i];
info(fileName, new PrintWriter(System.out));
} else if ("-compact".equals(args[i])) {
String fileName = args[++i];
compact(fileName, false);
} else if ("-compress".equals(args[i])) {
String fileName = args[++i];
compact(fileName, true);
} else if ("-rollback".equals(args[i])) {
String fileName = args[++i];
long targetVersion = Long.decode(args[++i]);
rollback(fileName, targetVersion, new PrintWriter(System.out));
} else if ("-repair".equals(args[i])) {
String fileName = args[++i];
repair(fileName);
}
}
}
/**
* Read the contents of the file and write them to system out.
*
* @param fileName the name of the file
* @param details whether to print details
*/
public static void dump(String fileName, boolean details) {
dump(fileName, new PrintWriter(System.out), details);
}
/**
* Read the summary information of the file and write them to system out.
*
* @param fileName the name of the file
*/
public static void info(String fileName) {
info(fileName, new PrintWriter(System.out));
}
/**
* Read the contents of the file and display them in a human-readable
* format.
*
* @param fileName the name of the file
* @param writer the print writer
* @param details print the page details
*/
public static void dump(String fileName, Writer writer, boolean details) {
PrintWriter pw = new PrintWriter(writer, true);
if (!FilePath.get(fileName).exists()) {
pw.println("File not found: " + fileName);
return;
}
long size = FileUtils.size(fileName);
pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024);
FileChannel file = null;
int blockSize = MVStore.BLOCK_SIZE;
TreeMap mapSizesTotal =
new TreeMap<>();
long pageSizeTotal = 0;
try {
file = FilePath.get(fileName).open("r");
long fileSize = file.size();
int len = Long.toHexString(fileSize).length();
ByteBuffer block = ByteBuffer.allocate(4096);
long pageCount = 0;
for (long pos = 0; pos < fileSize;) {
block.rewind();
// Bugfix - An IllegalStateException that wraps EOFException is
// thrown when partial writes happens in the case of power off
// or file system issues.
// So we should skip the broken block at end of the DB file.
try {
DataUtils.readFully(file, pos, block);
} catch (IllegalStateException e){
pos += blockSize;
pw.printf("ERROR illegal position %d%n", pos);
continue;
}
block.rewind();
int headerType = block.get();
if (headerType == 'H') {
String header = new String(block.array(), StandardCharsets.ISO_8859_1).trim();
pw.printf("%0" + len + "x fileHeader %s%n",
pos, header);
pos += blockSize;
continue;
}
if (headerType != 'c') {
pos += blockSize;
continue;
}
block.position(0);
Chunk c = null;
try {
c = Chunk.readChunkHeader(block, pos);
} catch (IllegalStateException e) {
pos += blockSize;
continue;
}
if (c.len <= 0) {
// not a chunk
pos += blockSize;
continue;
}
int length = c.len * MVStore.BLOCK_SIZE;
pw.printf("%n%0" + len + "x chunkHeader %s%n",
pos, c.toString());
ByteBuffer chunk = ByteBuffer.allocate(length);
DataUtils.readFully(file, pos, chunk);
int p = block.position();
pos += length;
int remaining = c.pageCount;
pageCount += c.pageCount;
TreeMap mapSizes =
new TreeMap<>();
int pageSizeSum = 0;
while (remaining > 0) {
int start = p;
try {
chunk.position(p);
} catch (IllegalArgumentException e) {
// too far
pw.printf("ERROR illegal position %d%n", p);
break;
}
int pageSize = chunk.getInt();
// check value (ignored)
chunk.getShort();
int mapId = DataUtils.readVarInt(chunk);
int entries = DataUtils.readVarInt(chunk);
int type = chunk.get();
boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0;
boolean node = (type & 1) != 0;
if (details) {
pw.printf(
"+%0" + len +
"x %s, map %x, %d entries, %d bytes, maxLen %x%n",
p,
(node ? "node" : "leaf") +
(compressed ? " compressed" : ""),
mapId,
node ? entries + 1 : entries,
pageSize,
DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0))
);
}
p += pageSize;
Integer mapSize = mapSizes.get(mapId);
if (mapSize == null) {
mapSize = 0;
}
mapSizes.put(mapId, mapSize + pageSize);
Long total = mapSizesTotal.get(mapId);
if (total == null) {
total = 0L;
}
mapSizesTotal.put(mapId, total + pageSize);
pageSizeSum += pageSize;
pageSizeTotal += pageSize;
remaining--;
long[] children = null;
long[] counts = null;
if (node) {
children = new long[entries + 1];
for (int i = 0; i <= entries; i++) {
children[i] = chunk.getLong();
}
counts = new long[entries + 1];
for (int i = 0; i <= entries; i++) {
long s = DataUtils.readVarLong(chunk);
counts[i] = s;
}
}
String[] keys = new String[entries];
if (mapId == 0 && details) {
ByteBuffer data;
if (compressed) {
boolean fast = (type & DataUtils.PAGE_COMPRESSED_HIGH) != DataUtils.PAGE_COMPRESSED_HIGH;
Compressor compressor = getCompressor(fast);
int lenAdd = DataUtils.readVarInt(chunk);
int compLen = pageSize + start - chunk.position();
byte[] comp = Utils.newBytes(compLen);
chunk.get(comp);
int l = compLen + lenAdd;
data = ByteBuffer.allocate(l);
compressor.expand(comp, 0, compLen, data.array(), 0, l);
} else {
data = chunk;
}
for (int i = 0; i < entries; i++) {
String k = StringDataType.INSTANCE.read(data);
keys[i] = k;
}
if (node) {
// meta map node
for (int i = 0; i < entries; i++) {
long cp = children[i];
pw.printf(" %d children < %s @ " +
"chunk %x +%0" +
len + "x%n",
counts[i],
keys[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
long cp = children[entries];
pw.printf(" %d children >= %s @ chunk %x +%0" +
len + "x%n",
counts[entries],
keys.length >= entries ? null : keys[entries],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
} else {
// meta map leaf
String[] values = new String[entries];
for (int i = 0; i < entries; i++) {
String v = StringDataType.INSTANCE.read(data);
values[i] = v;
}
for (int i = 0; i < entries; i++) {
pw.println(" " + keys[i] +
" = " + values[i]);
}
}
} else {
if (node && details) {
for (int i = 0; i <= entries; i++) {
long cp = children[i];
pw.printf(" %d children @ chunk %x +%0" +
len + "x%n",
counts[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
}
}
}
pageSizeSum = Math.max(1, pageSizeSum);
for (Integer mapId : mapSizes.keySet()) {
int percent = 100 * mapSizes.get(mapId) / pageSizeSum;
pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizes.get(mapId), percent);
}
int footerPos = chunk.limit() - Chunk.FOOTER_LENGTH;
try {
chunk.position(footerPos);
pw.printf(
"+%0" + len + "x chunkFooter %s%n",
footerPos,
new String(chunk.array(), chunk.position(),
Chunk.FOOTER_LENGTH, StandardCharsets.ISO_8859_1).trim());
} catch (IllegalArgumentException e) {
// too far
pw.printf("ERROR illegal footer position %d%n", footerPos);
}
}
pw.printf("%n%0" + len + "x eof%n", fileSize);
pw.printf("\n");
pageCount = Math.max(1, pageCount);
pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n",
pageSizeTotal, pageCount, pageSizeTotal / pageCount);
pageSizeTotal = Math.max(1, pageSizeTotal);
for (Integer mapId : mapSizesTotal.keySet()) {
int percent = (int) (100 * mapSizesTotal.get(mapId) / pageSizeTotal);
pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizesTotal.get(mapId), percent);
}
} catch (IOException e) {
pw.println("ERROR: " + e);
e.printStackTrace(pw);
} finally {
if (file != null) {
try {
file.close();
} catch (IOException e) {
// ignore
}
}
}
pw.flush();
}
private static Compressor getCompressor(boolean fast) {
return fast ? new CompressLZF() : new CompressDeflate();
}
/**
* Read the summary information of the file and write them to system out.
*
* @param fileName the name of the file
* @param writer the print writer
* @return null if successful (if there was no error), otherwise the error
* message
*/
public static String info(String fileName, Writer writer) {
PrintWriter pw = new PrintWriter(writer, true);
if (!FilePath.get(fileName).exists()) {
pw.println("File not found: " + fileName);
return "File not found: " + fileName;
}
long fileLength = FileUtils.size(fileName);
try (MVStore store = new MVStore.Builder().
fileName(fileName).recoveryMode().
readOnly().open()) {
MVMap meta = store.getMetaMap();
Map header = store.getStoreHeader();
long fileCreated = DataUtils.readHexLong(header, "created", 0L);
TreeMap chunks = new TreeMap<>();
long chunkLength = 0;
long maxLength = 0;
long maxLengthLive = 0;
long maxLengthNotEmpty = 0;
for (Entry e : meta.entrySet()) {
String k = e.getKey();
if (k.startsWith(DataUtils.META_CHUNK)) {
Chunk c = Chunk.fromString(e.getValue());
chunks.put(c.id, c);
chunkLength += c.len * MVStore.BLOCK_SIZE;
maxLength += c.maxLen;
maxLengthLive += c.maxLenLive;
if (c.maxLenLive > 0) {
maxLengthNotEmpty += c.maxLen;
}
}
}
pw.printf("Created: %s\n", formatTimestamp(fileCreated, fileCreated));
pw.printf("Last modified: %s\n",
formatTimestamp(FileUtils.lastModified(fileName), fileCreated));
pw.printf("File length: %d\n", fileLength);
pw.printf("The last chunk is not listed\n");
pw.printf("Chunk length: %d\n", chunkLength);
pw.printf("Chunk count: %d\n", chunks.size());
pw.printf("Used space: %d%%\n", getPercent(chunkLength, fileLength));
pw.printf("Chunk fill rate: %d%%\n", maxLength == 0 ? 100 :
getPercent(maxLengthLive, maxLength));
pw.printf("Chunk fill rate excluding empty chunks: %d%%\n",
maxLengthNotEmpty == 0 ? 100 :
getPercent(maxLengthLive, maxLengthNotEmpty));
for (Entry e : chunks.entrySet()) {
Chunk c = e.getValue();
long created = fileCreated + c.time;
pw.printf(" Chunk %d: %s, %d%% used, %d blocks",
c.id, formatTimestamp(created, fileCreated),
getPercent(c.maxLenLive, c.maxLen),
c.len
);
if (c.maxLenLive == 0) {
pw.printf(", unused: %s",
formatTimestamp(fileCreated + c.unused, fileCreated));
}
pw.printf("\n");
}
pw.printf("\n");
} catch (Exception e) {
pw.println("ERROR: " + e);
e.printStackTrace(pw);
return e.getMessage();
}
pw.flush();
return null;
}
private static String formatTimestamp(long t, long start) {
String x = new Timestamp(t).toString();
String s = x.substring(0, 19);
s += " (+" + ((t - start) / 1000) + " s)";
return s;
}
private static int getPercent(long value, long max) {
if (value == 0) {
return 0;
} else if (value == max) {
return 100;
}
return (int) (1 + 98 * value / Math.max(1, max));
}
/**
* Compress the store by creating a new file and copying the live pages
* there. Temporarily, a file with the suffix ".tempFile" is created. This
* file is then renamed, replacing the original file, if possible. If not,
* the new file is renamed to ".newFile", then the old file is removed, and
* the new file is renamed. This might be interrupted, so it's better to
* compactCleanUp before opening a store, in case this method was used.
*
* @param fileName the file name
* @param compress whether to compress the data
*/
public static void compact(String fileName, boolean compress) {
String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE;
FileUtils.delete(tempName);
compact(fileName, tempName, compress);
try {
FileUtils.moveAtomicReplace(tempName, fileName);
} catch (DbException e) {
String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE;
FileUtils.delete(newName);
FileUtils.move(tempName, newName);
FileUtils.delete(fileName);
FileUtils.move(newName, fileName);
}
}
/**
* Clean up if needed, in a case a compact operation was interrupted due to
* killing the process or a power failure. This will delete temporary files
* (if any), and in case atomic file replacements were not used, rename the
* new file.
*
* @param fileName the file name
*/
public static void compactCleanUp(String fileName) {
String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE;
if (FileUtils.exists(tempName)) {
FileUtils.delete(tempName);
}
String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE;
if (FileUtils.exists(newName)) {
if (FileUtils.exists(fileName)) {
FileUtils.delete(newName);
} else {
FileUtils.move(newName, fileName);
}
}
}
/**
* Copy all live pages from the source store to the target store.
*
* @param sourceFileName the name of the source store
* @param targetFileName the name of the target store
* @param compress whether to compress the data
*/
public static void compact(String sourceFileName, String targetFileName, boolean compress) {
try (MVStore source = new MVStore.Builder().
fileName(sourceFileName).readOnly().open()) {
// Bugfix - Add double "try-finally" statements to close source and target stores for
//releasing lock and file resources in these stores even if OOM occurs.
// Fix issues such as "Cannot delete file "/h2/data/test.mv.db.tempFile" [90025-197]"
//when client connects to this server and reopens this store database in this process.
// @since 2018-09-13 little-pan
FileUtils.delete(targetFileName);
MVStore.Builder b = new MVStore.Builder().
fileName(targetFileName);
if (compress) {
b.compress();
}
try (MVStore target = b.open()) {
compact(source, target);
}
}
}
/**
* Copy all live pages from the source store to the target store.
*
* @param source the source store
* @param target the target store
*/
public static void compact(MVStore source, MVStore target) {
int autoCommitDelay = target.getAutoCommitDelay();
boolean reuseSpace = target.getReuseSpace();
try {
target.setReuseSpace(false); // disable unused chunks collection
target.setAutoCommitDelay(0); // disable autocommit
MVMap sourceMeta = source.getMetaMap();
MVMap targetMeta = target.getMetaMap();
for (Entry m : sourceMeta.entrySet()) {
String key = m.getKey();
if (key.startsWith(DataUtils.META_CHUNK)) {
// ignore
} else if (key.startsWith(DataUtils.META_MAP)) {
// ignore
} else if (key.startsWith(DataUtils.META_NAME)) {
// ignore
} else if (key.startsWith(DataUtils.META_ROOT)) {
// ignore
} else {
targetMeta.put(key, m.getValue());
}
}
// We are going to cheat a little bit in the copyFrom() by employing "incomplete" pages,
// which would be spared of saving, but save completed pages underneath,
// and those may appear as dead (non-reachable).
// That's why it is important to preserve all chunks
// created in the process, especially if retention time
// is set to a lower value, or even 0.
for (String mapName : source.getMapNames()) {
MVMap.Builder