org.glowroot.local.store.TraceDao Maven / Gradle / Ivy
The newest version!
/*
* Copyright 2011-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.glowroot.local.store;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.List;
import java.util.Locale;
import java.util.Map.Entry;
import javax.annotation.Nullable;
import org.glowroot.shaded.google.common.base.Strings;
import org.glowroot.shaded.google.common.collect.ImmutableList;
import org.glowroot.shaded.google.common.collect.ImmutableSetMultimap;
import org.glowroot.shaded.google.common.collect.Lists;
import org.glowroot.shaded.google.common.io.CharSource;
import org.checkerframework.checker.tainting.qual.Untainted;
import org.glowroot.shaded.slf4j.Logger;
import org.glowroot.shaded.slf4j.LoggerFactory;
import org.glowroot.collector.Trace;
import org.glowroot.collector.TraceRepository;
import org.glowroot.common.ChunkSource;
import org.glowroot.local.store.DataSource.BatchAdder;
import org.glowroot.local.store.DataSource.ResultSetExtractor;
import org.glowroot.local.store.DataSource.RowMapper;
import org.glowroot.markers.OnlyUsedByTests;
import static org.glowroot.shaded.google.common.base.Preconditions.checkNotNull;
import static org.glowroot.common.Checkers.castUntainted;
public class TraceDao implements TraceRepository {
private static final Logger logger = LoggerFactory.getLogger(TraceDao.class);
private static final ImmutableList traceColumns = ImmutableList.of(
Column.of("id", Types.VARCHAR).withPrimaryKey(true),
Column.of("partial", Types.BIGINT),
Column.of("slow", Types.BOOLEAN),
Column.of("error", Types.BOOLEAN),
Column.of("start_time", Types.BIGINT),
Column.of("capture_time", Types.BIGINT),
Column.of("duration", Types.BIGINT), // nanoseconds
Column.of("transaction_type", Types.VARCHAR),
Column.of("transaction_name", Types.VARCHAR),
Column.of("headline", Types.VARCHAR),
Column.of("user", Types.VARCHAR),
Column.of("custom_attributes", Types.VARCHAR), // json data
Column.of("custom_detail", Types.VARCHAR), // json data
Column.of("error_message", Types.VARCHAR),
Column.of("error_throwable", Types.VARCHAR), // json data
Column.of("timers", Types.VARCHAR), // json data
Column.of("thread_cpu_time", Types.BIGINT), // nanoseconds
Column.of("thread_blocked_time", Types.BIGINT), // nanoseconds
Column.of("thread_waited_time", Types.BIGINT), // nanoseconds
Column.of("thread_allocated_bytes", Types.BIGINT),
Column.of("gc_infos", Types.VARCHAR), // json data
Column.of("entry_count", Types.BIGINT),
Column.of("entries_capped_id", Types.VARCHAR), // capped database id
Column.of("profile_sample_count", Types.BIGINT),
// profile json is always from "synthetic root"
Column.of("profile_capped_id", Types.VARCHAR)); // capped database id
// capture_time column is used for expiring records without using FK with on delete cascade
private static final ImmutableList transactionCustomAttributeColumns = ImmutableList.of(
Column.of("trace_id", Types.VARCHAR),
Column.of("name", Types.VARCHAR),
Column.of("value", Types.VARCHAR),
Column.of("capture_time", Types.BIGINT));
private static final ImmutableList traceIndexes = ImmutableList.of(
// duration, id and error columns are included so h2 can return the result set directly
// from the index without having to reference the table for each row
//
// trace_slow_idx is for slow trace point query and for readOverallSlowCount()
Index.of("trace_slow_idx", ImmutableList.of("transaction_type", "slow", "capture_time",
"duration", "error", "id")),
// trace_slow_idx is for slow trace point query and for readTransactionSlowCount()
Index.of("trace_transaction_slow_idx", ImmutableList.of("transaction_type",
"transaction_name", "slow", "capture_time", "duration", "error", "id")),
// trace_error_idx is for error trace point query and for readOverallErrorCount()
Index.of("trace_error_idx", ImmutableList.of("transaction_type", "error",
"capture_time", "duration", "error", "id")),
// trace_error_idx is for error trace point query and for readTransactionErrorCount()
Index.of("trace_transaction_error_idx", ImmutableList.of("transaction_type",
"transaction_name", "error", "capture_time", "duration", "id")));
private final DataSource dataSource;
private final CappedDatabase traceCappedDatabase;
TraceDao(DataSource dataSource, CappedDatabase traceCappedDatabase) throws SQLException {
this.dataSource = dataSource;
this.traceCappedDatabase = traceCappedDatabase;
upgradeTraceTable(dataSource);
dataSource.syncTable("trace", traceColumns);
dataSource.syncIndexes("trace", traceIndexes);
dataSource.syncTable("trace_custom_attribute", transactionCustomAttributeColumns);
}
@Override
public void store(final Trace trace, boolean slow, @Nullable ChunkSource entries,
@Nullable ChunkSource profile) throws Exception {
Long entriesId = null;
if (entries != null) {
entriesId = traceCappedDatabase.write(entries, TraceCappedDatabaseStats.TRACE_ENTRIES);
}
Long profileId = null;
if (profile != null) {
profileId = traceCappedDatabase.write(profile, TraceCappedDatabaseStats.TRACE_PROFILES);
}
dataSource.update("merge into trace (id, partial, slow, error, start_time, capture_time,"
+ " duration, transaction_type, transaction_name, headline, user,"
+ " custom_attributes, custom_detail, error_message, error_throwable, timers,"
+ " thread_cpu_time, thread_blocked_time, thread_waited_time,"
+ " thread_allocated_bytes, gc_infos, entry_count, entries_capped_id,"
+ " profile_sample_count, profile_capped_id) values"
+ " (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
trace.id(), trace.partial(), slow, trace.error(), trace.startTime(),
trace.captureTime(), trace.duration(), trace.transactionType(),
trace.transactionName(), trace.headline(), trace.user(), trace.customAttributes(),
trace.customDetail(), trace.errorMessage(), trace.errorThrowable(), trace.timers(),
trace.threadCpuTime(), trace.threadBlockedTime(), trace.threadWaitedTime(),
trace.threadAllocatedBytes(), trace.gcInfos(), trace.entryCount(), entriesId,
trace.profileSampleCount(), profileId);
final ImmutableSetMultimap customAttributesForIndexing =
trace.customAttributesForIndexing();
if (!customAttributesForIndexing.isEmpty()) {
dataSource.batchUpdate("insert into trace_custom_attribute (trace_id, name,"
+ " value, capture_time) values (?, ?, ?, ?)", new BatchAdder() {
@Override
public void addBatches(PreparedStatement preparedStatement)
throws SQLException {
for (Entry entry : customAttributesForIndexing
.entries()) {
preparedStatement.setString(1, trace.id());
preparedStatement.setString(2, entry.getKey());
preparedStatement.setString(3, entry.getValue());
preparedStatement.setLong(4, trace.captureTime());
preparedStatement.addBatch();
}
}
});
}
}
public QueryResult readPoints(TracePointQuery query) throws SQLException {
ParameterizedSql parameterizedSql = query.getParameterizedSql();
ImmutableList points = dataSource.query(parameterizedSql.sql(),
new TracePointRowMapper(), parameterizedSql.argsAsArray());
// one extra record over the limit is fetched above to identify if the limit was hit
return QueryResult.from(points, query.limit());
}
public long readOverallSlowCount(String transactionType, long captureTimeFrom,
long captureTimeTo) throws SQLException {
return dataSource.queryForLong("select count(*) from trace where transaction_type = ?"
+ " and capture_time > ? and capture_time <= ? and slow = ?", transactionType,
captureTimeFrom, captureTimeTo, true);
}
public long readTransactionSlowCount(String transactionType, String transactionName,
long captureTimeFrom, long captureTimeTo) throws SQLException {
return dataSource.queryForLong("select count(*) from trace where transaction_type = ?"
+ " and transaction_name = ? and capture_time > ? and capture_time <= ?"
+ " and slow = ?", transactionType, transactionName, captureTimeFrom, captureTimeTo,
true);
}
public long readOverallErrorCount(String transactionType, long captureTimeFrom,
long captureTimeTo) throws SQLException {
return dataSource.queryForLong("select count(*) from trace where transaction_type = ?"
+ " and capture_time > ? and capture_time <= ? and error = ?", transactionType,
captureTimeFrom, captureTimeTo, true);
}
public long readTransactionErrorCount(String transactionType, String transactionName,
long captureTimeFrom, long captureTimeTo) throws SQLException {
return dataSource.queryForLong("select count(*) from trace where transaction_type = ?"
+ " and transaction_name = ? and capture_time > ? and capture_time <= ? and"
+ " error = ?", transactionType, transactionName, captureTimeFrom, captureTimeTo,
true);
}
public ImmutableList readErrorPoints(ErrorMessageQuery query,
long resolutionMillis, long liveCaptureTime) throws SQLException {
// need ".0" to force double result
String captureTimeSql = castUntainted(
"ceil(capture_time / " + resolutionMillis + ".0) * " + resolutionMillis);
ParameterizedSql parameterizedSql = buildErrorMessageQuery(query,
"select " + captureTimeSql + ", count(*) from trace",
"group by " + captureTimeSql + " order by " + captureTimeSql);
return dataSource.query(parameterizedSql.sql(), new ErrorPointRowMapper(liveCaptureTime),
parameterizedSql.argsAsArray());
}
public QueryResult readErrorMessageCounts(ErrorMessageQuery query)
throws SQLException {
ParameterizedSql parameterizedSql = buildErrorMessageQuery(query,
"select error_message, count(*) from trace",
"group by error_message order by count(*) desc");
ImmutableList points = dataSource.query(parameterizedSql.sql(),
new ErrorMessageCountRowMapper(), parameterizedSql.argsAsArray());
// one extra record over the limit is fetched above to identify if the limit was hit
return QueryResult.from(points, query.limit());
}
public @Nullable Trace readTrace(String traceId) throws SQLException {
List traces = dataSource.query("select id, partial, error, start_time,"
+ " capture_time, duration, transaction_type, transaction_name, headline, user,"
+ " custom_attributes, custom_detail, error_message, error_throwable,"
+ " timers, thread_cpu_time, thread_blocked_time, thread_waited_time,"
+ " thread_allocated_bytes, gc_infos, entry_count, entries_capped_id,"
+ " profile_sample_count, profile_capped_id from trace where id = ?",
new TraceRowMapper(), traceId);
if (traces.isEmpty()) {
return null;
}
if (traces.size() > 1) {
logger.error("multiple records returned for trace id: {}", traceId);
}
return traces.get(0);
}
public @Nullable CharSource readEntries(String traceId) throws SQLException {
return readFromCappedDatabase("entries_capped_id", traceId);
}
public @Nullable CharSource readProfile(String traceId) throws SQLException {
return readFromCappedDatabase("profile_capped_id", traceId);
}
public void deleteAll() throws SQLException {
dataSource.execute("truncate table trace_custom_attribute");
dataSource.execute("truncate table trace");
}
void deleteBefore(long captureTime) throws SQLException {
dataSource.deleteBefore("trace_custom_attribute", captureTime);
dataSource.deleteBefore("trace", captureTime);
}
private @Nullable CharSource readFromCappedDatabase(@Untainted String columnName,
String traceId) throws SQLException {
return dataSource.query("select " + columnName + " from trace where id = ?",
new CappedIdResultExtractor(), traceId);
}
@OnlyUsedByTests
public long count() throws SQLException {
return dataSource.queryForLong("select count(*) from trace");
}
private ParameterizedSql buildErrorMessageQuery(ErrorMessageQuery query,
@Untainted String selectClause, @Untainted String groupByClause) {
String sql = selectClause;
List