Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Copyright 2015-2018 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ProtocolVersion;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.ContiguousSet;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterators;
import com.google.common.collect.Ordering;
import com.google.common.collect.Range;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import zipkin.Codec;
import zipkin.DependencyLink;
import zipkin.Span;
import zipkin.internal.CorrectForClockSkew;
import zipkin.internal.Dependencies;
import zipkin.internal.DependencyLinker;
import zipkin.internal.GroupByTraceId;
import zipkin.internal.MergeById;
import zipkin.internal.Nullable;
import zipkin.storage.QueryRequest;
import zipkin.storage.guava.GuavaSpanStore;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.DiscreteDomain.integers;
import static com.google.common.util.concurrent.Futures.allAsList;
import static com.google.common.util.concurrent.Futures.immediateFailedFuture;
import static com.google.common.util.concurrent.Futures.immediateFuture;
import static com.google.common.util.concurrent.Futures.transform;
import static zipkin.internal.Util.getDays;
public final class CassandraSpanStore implements GuavaSpanStore {
private static final Logger LOG = LoggerFactory.getLogger(CassandraSpanStore.class);
static final ListenableFuture> EMPTY_LIST =
immediateFuture(Collections.emptyList());
private final int maxTraceCols;
private final int indexFetchMultiplier;
private final boolean strictTraceId;
private final Session session;
private final TimestampCodec timestampCodec;
private final Set buckets;
private final PreparedStatement selectTraces;
private final PreparedStatement selectDependencies;
private final PreparedStatement selectServiceNames;
private final PreparedStatement selectSpanNames;
private final PreparedStatement selectTraceIdsByServiceName;
private final PreparedStatement selectTraceIdsByServiceNames;
private final PreparedStatement selectTraceIdsBySpanName;
private final PreparedStatement selectTraceIdsByAnnotation;
private final Function> traceIdToTimestamp;
CassandraSpanStore(Session session, int bucketCount, int maxTraceCols, int indexFetchMultiplier,
boolean strictTraceId) {
this.session = session;
this.maxTraceCols = maxTraceCols;
this.indexFetchMultiplier = indexFetchMultiplier;
this.strictTraceId = strictTraceId;
ProtocolVersion protocolVersion = session.getCluster()
.getConfiguration().getProtocolOptions().getProtocolVersion();
this.timestampCodec = new TimestampCodec(protocolVersion);
this.buckets = ContiguousSet.create(Range.closedOpen(0, bucketCount), integers());
selectTraces = session.prepare(
QueryBuilder.select("trace_id", "span")
.from("traces")
.where(QueryBuilder.in("trace_id", QueryBuilder.bindMarker("trace_id")))
.limit(QueryBuilder.bindMarker("limit_")));
selectDependencies = session.prepare(
QueryBuilder.select("dependencies")
.from("dependencies")
.where(QueryBuilder.in("day", QueryBuilder.bindMarker("days"))));
selectServiceNames = session.prepare(
QueryBuilder.select("service_name")
.from(Tables.SERVICE_NAMES));
selectSpanNames = session.prepare(
QueryBuilder.select("span_name")
.from(Tables.SPAN_NAMES)
.where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.eq("bucket", QueryBuilder.bindMarker("bucket")))
.limit(QueryBuilder.bindMarker("limit_")));
selectTraceIdsByServiceName = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from(Tables.SERVICE_NAME_INDEX)
.where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
selectTraceIdsBySpanName = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from(Tables.SERVICE_SPAN_NAME_INDEX)
.where(
QueryBuilder.eq("service_span_name", QueryBuilder.bindMarker("service_span_name")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
selectTraceIdsByAnnotation = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from(Tables.ANNOTATIONS_INDEX)
.where(QueryBuilder.eq("annotation", QueryBuilder.bindMarker("annotation")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) {
LOG.warn("Please update Cassandra to 2.2 or later, as some features may fail");
// Log vs failing on "Partition KEY part service_name cannot be restricted by IN relation"
selectTraceIdsByServiceNames = null;
} else {
selectTraceIdsByServiceNames = session.prepare(
QueryBuilder.select("ts", "trace_id")
.from(Tables.SERVICE_NAME_INDEX)
.where(QueryBuilder.in("service_name", QueryBuilder.bindMarker("service_name")))
.and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
.and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
.and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
.limit(QueryBuilder.bindMarker("limit_"))
.orderBy(QueryBuilder.desc("ts")));
}
traceIdToTimestamp = new Function>() {
@Override public Map apply(ResultSet input) {
Map result = new LinkedHashMap<>();
for (Row row : input) {
result.put(row.getLong("trace_id"), timestampCodec.deserialize(row, "ts"));
}
return result;
}
};
}
/**
* This fans out into a potentially large amount of requests related to the amount of annotations
* queried. The returned future will fail if any of the inputs fail.
*
*
When {@link QueryRequest#serviceName service name} is unset, service names will be
* fetched eagerly, implying an additional query.
*/
@Override
public ListenableFuture>> getTraces(final QueryRequest request) {
checkArgument(request.minDuration == null,
"getTraces with duration is unsupported. Upgrade to the new cassandra3 schema.");
// Over fetch on indexes as they don't return distinct (trace id, timestamp) rows.
final int traceIndexFetchSize = request.limit * indexFetchMultiplier;
ListenableFuture