
com.thinkaurelius.titan.diskstorage.cassandra.astyanax.AstyanaxKeyColumnValueStore Maven / Gradle / Ivy
The newest version!
package com.thinkaurelius.titan.diskstorage.cassandra.astyanax;
import com.google.common.base.Predicate;
import com.google.common.collect.*;
import com.netflix.astyanax.ExceptionCallback;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.*;
import com.netflix.astyanax.query.AllRowsQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.thinkaurelius.titan.diskstorage.*;
import com.thinkaurelius.titan.diskstorage.cassandra.utils.CassandraHelper;
import com.thinkaurelius.titan.diskstorage.keycolumnvalue.*;
import com.thinkaurelius.titan.diskstorage.util.RecordIterator;
import com.thinkaurelius.titan.diskstorage.util.StaticArrayBuffer;
import com.thinkaurelius.titan.diskstorage.util.StaticArrayEntry;
import javax.annotation.Nullable;
import java.nio.ByteBuffer;
import java.util.*;
import static com.thinkaurelius.titan.diskstorage.cassandra.AbstractCassandraStoreManager.Partitioner;
import static com.thinkaurelius.titan.diskstorage.cassandra.CassandraTransaction.getTx;
public class AstyanaxKeyColumnValueStore implements KeyColumnValueStore {
private final Keyspace keyspace;
private final String columnFamilyName;
private final ColumnFamily columnFamily;
private final RetryPolicy retryPolicy;
private final AstyanaxStoreManager storeManager;
private final AstyanaxGetter entryGetter;
AstyanaxKeyColumnValueStore(String columnFamilyName,
Keyspace keyspace,
AstyanaxStoreManager storeManager,
RetryPolicy retryPolicy) {
this.keyspace = keyspace;
this.columnFamilyName = columnFamilyName;
this.retryPolicy = retryPolicy;
this.storeManager = storeManager;
entryGetter = new AstyanaxGetter(storeManager.getMetaDataSchema(columnFamilyName));
columnFamily = new ColumnFamily(
this.columnFamilyName,
ByteBufferSerializer.get(),
ByteBufferSerializer.get());
}
ColumnFamily getColumnFamily() {
return columnFamily;
}
@Override
public void close() throws BackendException {
//Do nothing
}
@Override
public EntryList getSlice(KeySliceQuery query, StoreTransaction txh) throws BackendException {
Map result = getNamesSlice(query.getKey(), query, txh);
return Iterables.getOnlyElement(result.values(),EntryList.EMPTY_LIST);
}
@Override
public Map getSlice(List keys, SliceQuery query, StoreTransaction txh) throws BackendException {
return getNamesSlice(keys, query, txh);
}
public Map getNamesSlice(StaticBuffer key,
SliceQuery query, StoreTransaction txh) throws BackendException {
return getNamesSlice(ImmutableList.of(key),query,txh);
}
public Map getNamesSlice(List keys,
SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery should be parameterized as
* RowQuery. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
RowSliceQuery rq = keyspace.prepareQuery(columnFamily)
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax())
.withRetryPolicy(retryPolicy.duplicate())
.getKeySlice(CassandraHelper.convert(keys));
// Thank you, Astyanax, for making builder pattern useful :(
rq.withColumnRange(query.getSliceStart().asByteBuffer(),
query.getSliceEnd().asByteBuffer(),
false,
query.getLimit() + (query.hasLimit()?1:0)); //Add one for potentially removed last column
OperationResult> r;
try {
r = (OperationResult>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
Rows rows = r.getResult();
Map result = new HashMap(rows.size());
for (Row row : rows) {
assert !result.containsKey(row.getKey());
result.put(StaticArrayBuffer.of(row.getKey()),
CassandraHelper.makeEntryList(row.getColumns(),entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
private static class AstyanaxGetter implements StaticArrayEntry.GetColVal,ByteBuffer> {
private final EntryMetaData[] schema;
private AstyanaxGetter(EntryMetaData[] schema) {
this.schema = schema;
}
@Override
public ByteBuffer getColumn(Column element) {
return element.getName();
}
@Override
public ByteBuffer getValue(Column element) {
return element.getByteBufferValue();
}
@Override
public EntryMetaData[] getMetaSchema(Column element) {
return schema;
}
@Override
public Object getMetaData(Column element, EntryMetaData meta) {
switch(meta) {
case TIMESTAMP:
return element.getTimestamp();
case TTL:
return element.getTtl();
default:
throw new UnsupportedOperationException("Unsupported meta data: " + meta);
}
}
}
@Override
public void mutate(StaticBuffer key, List additions, List deletions, StoreTransaction txh) throws BackendException {
mutateMany(ImmutableMap.of(key, new KCVMutation(additions, deletions)), txh);
}
public void mutateMany(Map mutations, StoreTransaction txh) throws BackendException {
storeManager.mutateMany(ImmutableMap.of(columnFamilyName, mutations), txh);
}
@Override
public void acquireLock(StaticBuffer key, StaticBuffer column, StaticBuffer expectedValue, StoreTransaction txh) throws BackendException {
throw new UnsupportedOperationException();
}
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(),
sliceQuery.getSliceEnd().asByteBuffer(),
false,
sliceQuery.getLimit());
}
Rows result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = allRowsQuery.setRowLimit(storeManager.getPageSize()) // pre-fetch that many rows at a time
.setConcurrencyLevel(1) // one execution thread for fetching portion of rows
.setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
return retries > 2; // make 3 re-tries
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentBackendException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
@Override
public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException {
// this query could only be done when byte-ordering partitioner is used
// because Cassandra operates on tokens internally which means that even contiguous
// range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens
// returning ambiguous results to the user.
Partitioner partitioner = storeManager.getPartitioner();
if (partitioner != Partitioner.BYTEORDER)
throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner.");
ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer();
RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily)
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax())
.withRetryPolicy(retryPolicy.duplicate())
.getKeyRange(start, end, null, null, Integer.MAX_VALUE);
// Astyanax is bad at builder pattern :(
rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(),
query.getSliceEnd().asByteBuffer(),
false,
query.getLimit());
// Omit final the query's keyend from the result, if present in result
final Rows r;
try {
r = ((OperationResult>) rowSlice.execute()).getResult();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
Iterator> i =
Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer()));
return new RowIterator(i, query);
}
@Override
public String getName() {
return columnFamilyName;
}
private static class KeyIterationPredicate implements Predicate> {
@Override
public boolean apply(@Nullable Row row) {
return (row != null) && row.getColumns().size() > 0;
}
}
private static class KeySkipPredicate implements Predicate> {
private final ByteBuffer skip;
public KeySkipPredicate(ByteBuffer skip) {
this.skip = skip;
}
@Override
public boolean apply(@Nullable Row row) {
return (row != null) && !row.getKey().equals(skip);
}
}
private class RowIterator implements KeyIterator {
private final Iterator> rows;
private Row currentRow;
private final SliceQuery sliceQuery;
private boolean isClosed;
public RowIterator(Iterator> rowIter, SliceQuery sliceQuery) {
this.rows = Iterators.filter(rowIter, new KeyIterationPredicate());
this.sliceQuery = sliceQuery;
}
@Override
public RecordIterator getEntries() {
ensureOpen();
if (sliceQuery == null)
throw new IllegalStateException("getEntries() requires SliceQuery to be set.");
return new RecordIterator() {
private final Iterator columns =
CassandraHelper.makeEntryIterator(currentRow.getColumns(),
entryGetter,
sliceQuery.getSliceEnd(),sliceQuery.getLimit());
@Override
public boolean hasNext() {
ensureOpen();
return columns.hasNext();
}
@Override
public Entry next() {
ensureOpen();
return columns.next();
}
@Override
public void close() {
isClosed = true;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public boolean hasNext() {
ensureOpen();
return rows.hasNext();
}
@Override
public StaticBuffer next() {
ensureOpen();
currentRow = rows.next();
return StaticArrayBuffer.of(currentRow.getKey());
}
@Override
public void close() {
isClosed = true;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void ensureOpen() {
if (isClosed)
throw new IllegalStateException("Iterator has been closed.");
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy