Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
Copyright (C) SYSTAP, LLC DBA Blazegraph 2006-2016. All rights reserved.
Contact:
SYSTAP, LLC DBA Blazegraph
2501 Calvert ST NW #106
Washington, DC 20008
[email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Created on Mar 22, 2009
*/
package com.bigdata.counters.store;
import java.util.Arrays;
import java.util.Iterator;
import java.util.UUID;
import java.util.Vector;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import com.bigdata.btree.BTree;
import com.bigdata.btree.Checkpoint;
import com.bigdata.btree.DefaultTupleSerializer;
import com.bigdata.btree.IIndex;
import com.bigdata.btree.IRangeQuery;
import com.bigdata.btree.ITuple;
import com.bigdata.btree.ITupleIterator;
import com.bigdata.btree.IndexMetadata;
import com.bigdata.btree.keys.ASCIIKeyBuilderFactory;
import com.bigdata.btree.keys.IKeyBuilder;
import com.bigdata.btree.keys.IKeyBuilderFactory;
import com.bigdata.btree.keys.KVO;
import com.bigdata.btree.keys.KeyBuilder;
import com.bigdata.counters.CounterSet;
import com.bigdata.counters.DefaultInstrumentFactory;
import com.bigdata.counters.History;
import com.bigdata.counters.HistoryInstrument;
import com.bigdata.counters.ICounter;
import com.bigdata.counters.ICounterNode;
import com.bigdata.counters.ICounterSet;
import com.bigdata.counters.IHistoryEntry;
import com.bigdata.counters.IInstrument;
import com.bigdata.counters.PeriodEnum;
import com.bigdata.counters.History.SampleIterator;
import com.bigdata.counters.ICounterSet.IInstrumentFactory;
import com.bigdata.io.SerializerUtil;
import com.bigdata.rawstore.IRawStore;
import com.bigdata.sparse.SparseRowStore;
import com.bigdata.util.Bytes;
/**
* An API encapsulating for writing and querying counter sets. The data are
* written onto an {@link IIndex}. The {@link IIndex} may be local or remote.
*
* The multipart key is used. The first component is the milliseconds of the
* associated timestamp value rounded down to an even number of minutes and
* represented a long. The second component is the fully qualified path of the
* counter. The last component is the exact timestamp (in milliseconds) of the
* sampled counter value, represented as a long. These are formatted into an
* unsigned byte[] following the standard practice.
*
* The value stored under the key is the counter value. Normally counter values
* are doubles or longs, but you can store any of the counter value types which
* are supported by the {@link SparseRowStore}.
*
* Using this approach, writes of the same counter value with different
* timestamps will be recorded as different tuples in the {@link IIndex} and you
* can store counter values sampled at rates of once per second while retaining
* good compression for the keys in the index.
*
* @author Bryan Thompson
* @version $Id$
*
* FIXME Reading through per-minute counters from a CounterSetBTree grows slow
* very quickly.
*
*
* There are 21750988 counter values covering Fri Apr 03 15:51:57 EDT 2009 to
* Sat Apr 04 08:45:05 EDT 2009. Took 60 seconds to record each hour of data on
* the disk. 1.2G of XML data expanded to 2.6G on the journal
*
*
* In order to improve performance, put the counter paths in a separate
* dictionary and apply the regex there. Once we have the set of matched paths
* we can scatter range queries against the BTree and drag back the data for
* those counters (this would also make Unicode counter names viable). If the
* key was then [pathId,timestamp] we could do ordered reads of just the
* necessary key range for each desired counter. Prefix compression would still
* be efficent for this representation. While the data arrive in history blocks,
* we would still need to buffer them for ordered writes since otherwise the
* writes would be scattered by the first key component (pathId).
*
* I would have to encapsulate the counters as a counter for this to work, much
* like the RDF DB. There would be two relations: the dictionary and the
* timestamped values.
*
* Space efficient encoding of the counter values would also help quite a bit -
* it is Java default serialization, but we only store Long, Double or String.
* All values for a given counter should have the same data type (it is required
* by how we allocate the History) so the data type can be part of the
* dictionary and that can be used to decode the value. (If values tend to be
* close then a delta encoding would help.)
*/
public class CounterSetBTree extends BTree {
protected static transient final Logger log = Logger
.getLogger(CounterSetBTree.class);
/**
* @param store
* @param checkpoint
* @param metadata
*/
public CounterSetBTree(IRawStore store, Checkpoint checkpoint,
IndexMetadata metadata, boolean readOnly) {
super(store, checkpoint, metadata, readOnly);
}
static private final transient int INITIAL_CAPACITY = Bytes.kilobyte32;
/**
* Create a new instance.
*
* @param store
* The backing store.
*
* @return The new instance.
*/
static public CounterSetBTree create(final IRawStore store) {
final IndexMetadata metadata = new IndexMetadata(UUID.randomUUID());
metadata.setBTreeClassName(CounterSetBTree.class.getName());
metadata.setTupleSerializer(new CounterSetBTreeTupleSerializer(
new ASCIIKeyBuilderFactory(INITIAL_CAPACITY)));
return (CounterSetBTree) BTree.create(store, metadata);
}
static public CounterSetBTree createTransient() {
final IndexMetadata metadata = new IndexMetadata(UUID.randomUUID());
metadata.setBTreeClassName(CounterSetBTree.class.getName());
metadata.setTupleSerializer(new CounterSetBTreeTupleSerializer(
new ASCIIKeyBuilderFactory(INITIAL_CAPACITY)));
return (CounterSetBTree) BTree.createTransient(metadata);
}
/**
* A representation of a timestamped performance counter value as stored in
* the {@link CounterSetBTree}. The minutes, path, and timestamp fields are
* recovered from the key. The counter value is recovered from the value.
*
* @author Bryan Thompson
* @version $Id$
*/
static public class Entry {
// key
public final String path;
public final long timestamp;
// value
public final Object value;
public Entry(final long timestamp,
final String path, final Object value) {
this.timestamp = timestamp;
this.path = path;
this.value = value;
}
public String toString() {
return getClass().getName()+//
"{ path="+path+//
", value="+value+//
", timestamp="+timestamp+//
"}";
}
/**
* Return the depth of the path in the performance counter hierarchy
* (counts the #of '/' characters in the path).
*
* @return The depth.
*/
public int getDepth() {
int depth = 0;
final int len = path.length();
for (int i = 0; i < len; i++) {
if (path.charAt(i) == '/') {
depth++;
}
}
return depth;
}
}
/**
* Encapsulates key and value formation. The key is formed from the minutes,
* the path, and the timestamp. The value is the performance counter value
* for a specific timestamp.
*
* @author Bryan Thompson
* @version $Id$
*/
static protected class CounterSetBTreeTupleSerializer extends
DefaultTupleSerializer