Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ProtocolVersion;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Function;
import com.google.common.collect.ContiguousSet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Ordering;
import com.google.common.collect.Range;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import zipkin.Codec;
import zipkin.DependencyLink;
import zipkin.Span;
import zipkin.internal.CorrectForClockSkew;
import zipkin.internal.Dependencies;
import zipkin.internal.MergeById;
import zipkin.internal.Nullable;
import zipkin.internal.Pair;
import zipkin.storage.QueryRequest;
import zipkin.storage.guava.GuavaSpanStore;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.DiscreteDomain.integers;
import static com.google.common.util.concurrent.Futures.allAsList;
import static com.google.common.util.concurrent.Futures.immediateFailedFuture;
import static com.google.common.util.concurrent.Futures.immediateFuture;
import static com.google.common.util.concurrent.Futures.transform;
import static zipkin.internal.Util.midnightUTC;
public final class CassandraSpanStore implements GuavaSpanStore {
private static final Logger LOG = LoggerFactory.getLogger(CassandraSpanStore.class);
static final ListenableFuture> EMPTY_LIST =
immediateFuture(Collections.emptyList());
static final Ordering> TRACE_DESCENDING = Ordering.from(new Comparator>() {
@Override
public int compare(List left, List right) {
return right.get(0).compareTo(left.get(0));
}
});
private final int indexTtl;
private final int maxTraceCols;
private final Session session;
private final TimestampCodec timestampCodec;
private final Set buckets;
private final PreparedStatement selectTraces;
private final PreparedStatement selectDependencies;
private final PreparedStatement selectServiceNames;
private final PreparedStatement selectSpanNames;
private final PreparedStatement selectTraceIdsByServiceName;
private final PreparedStatement selectTraceIdsByServiceNames;
private final PreparedStatement selectTraceIdsBySpanName;
private final PreparedStatement selectTraceIdsByAnnotation;
private final PreparedStatement selectTraceIdsBySpanDuration;
private final Function> traceIdToTimestamp;
CassandraSpanStore(Session session, int bucketCount, int indexTtl, int maxTraceCols) {
this.session = session;
this.indexTtl = indexTtl;
this.maxTraceCols = maxTraceCols;
ProtocolVersion protocolVersion = session.getCluster()
.getConfiguration().getProtocolOptions().getProtocolVersion();
this.timestampCodec = new TimestampCodec(protocolVersion);
this.buckets = ContiguousSet.create(Range.closedOpen(0, bucketCount), integers());
selectTraces = session.prepare(
QueryBuilder.select("trace_id", "span")
.from("traces")
.where(QueryBuilder.in("trace_id", QueryBuilder.bindMarker("trace_id")))
.limit(QueryBuilder.bindMarker("limit_")));
selectDependencies = session.prepare(
QueryBuilder.select("dependencies")
.from("dependencies")
.where(QueryBuilder.in("day", QueryBuilder.bindMarker("days"))));
selectServiceNames = session.prepare(
QueryBuilder.select("service_name")
.from("service_names"));
selectSpanNames = session.prepare(
QueryBuilder.select("span_name")
.from("span_names")
.where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.eq("bucket", QueryBuilder.bindMarker("bucket")))
.limit(QueryBuilder.bindMarker("limit_")));
selectTraceIdsByServiceName = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from("service_name_index")
.where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
selectTraceIdsBySpanName = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from("service_span_name_index")
.where(
QueryBuilder.eq("service_span_name", QueryBuilder.bindMarker("service_span_name")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
selectTraceIdsByAnnotation = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from("annotations_index")
.where(QueryBuilder.eq("annotation", QueryBuilder.bindMarker("annotation")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
selectTraceIdsBySpanDuration = session.prepare(
QueryBuilder.select("duration", "ts", "trace_id")
.from("span_duration_index")
.where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.eq("span_name", QueryBuilder.bindMarker("span_name")))
.and(QueryBuilder.eq("bucket", QueryBuilder.bindMarker("time_bucket")))
.and(QueryBuilder.lte("duration", QueryBuilder.bindMarker("max_duration")))
.and(QueryBuilder.gte("duration", QueryBuilder.bindMarker("min_duration")))
.orderBy(QueryBuilder.desc("duration")));
if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) {
LOG.warn("Please update Cassandra to 2.2 or later, as some features may fail");
// Log vs failing on "Partition KEY part service_name cannot be restricted by IN relation"
selectTraceIdsByServiceNames = null;
} else {
selectTraceIdsByServiceNames = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from("service_name_index")
.where(QueryBuilder.in("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
}
traceIdToTimestamp = new Function>() {
@Override public Map apply(ResultSet input) {
Map traceIdsToTimestamps = new LinkedHashMap<>();
for (Row row : input) {
traceIdsToTimestamps.put(row.getLong("trace_id"), timestampCodec.deserialize(row, "ts"));
}
return traceIdsToTimestamps;
}
};
}
/**
* This fans out into a potentially large amount of requests, particularly if duration is set, but
* also related to the amount of annotations queried. The returned future will fail if any of the
* inputs fail.
*
*
When {@link QueryRequest#serviceName service name} is unset, service names will be
* fetched eagerly, implying an additional query.
*
*
The duration query is the most expensive query in cassandra, as it turns into 1 request per
* hour of {@link QueryRequest#lookback lookback}. Because many times lookback is set to a day,
* this means 24 requests to the backend!
*
*
See https://github.com/openzipkin/zipkin-java/issues/200
*/
@Override
public ListenableFuture>> getTraces(final QueryRequest request) {
ListenableFuture