goodmetrics.downstream.OpentelemetryClient.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of goodmetrics-kotlin Show documentation
Show all versions of goodmetrics-kotlin Show documentation
A metrics recording library that is good
package goodmetrics.downstream
import goodmetrics.Metrics
import goodmetrics.io.opentelemetry.proto.collector.metrics.v1.MetricsServiceGrpcKt
import goodmetrics.io.opentelemetry.proto.collector.metrics.v1.exportMetricsServiceRequest
import goodmetrics.io.opentelemetry.proto.common.v1.KeyValue
import goodmetrics.io.opentelemetry.proto.common.v1.anyValue
import goodmetrics.io.opentelemetry.proto.common.v1.instrumentationScope
import goodmetrics.io.opentelemetry.proto.common.v1.keyValue
import goodmetrics.io.opentelemetry.proto.metrics.v1.AggregationTemporality
import goodmetrics.io.opentelemetry.proto.metrics.v1.Metric
import goodmetrics.io.opentelemetry.proto.metrics.v1.ResourceMetrics
import goodmetrics.io.opentelemetry.proto.metrics.v1.ScopeMetrics
import goodmetrics.io.opentelemetry.proto.metrics.v1.gauge
import goodmetrics.io.opentelemetry.proto.metrics.v1.histogram
import goodmetrics.io.opentelemetry.proto.metrics.v1.histogramDataPoint
import goodmetrics.io.opentelemetry.proto.metrics.v1.metric
import goodmetrics.io.opentelemetry.proto.metrics.v1.numberDataPoint
import goodmetrics.io.opentelemetry.proto.metrics.v1.resourceMetrics
import goodmetrics.io.opentelemetry.proto.metrics.v1.scopeMetrics
import goodmetrics.io.opentelemetry.proto.metrics.v1.sum
import goodmetrics.io.opentelemetry.proto.resource.v1.resource
import goodmetrics.pipeline.AggregatedBatch
import goodmetrics.pipeline.Aggregation
import goodmetrics.pipeline.bucket
import goodmetrics.pipeline.bucketBelow
import io.grpc.CallOptions
import io.grpc.ClientInterceptor
import io.grpc.Deadline
import io.grpc.ManagedChannel
import io.grpc.netty.GrpcSslContexts
import io.grpc.netty.NettyChannelBuilder
import io.netty.handler.ssl.util.InsecureTrustManagerFactory
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.LongAdder
import kotlin.time.Duration
import kotlin.time.Duration.Companion.nanoseconds
import kotlin.time.Duration.Companion.seconds
sealed interface PrescientDimensions {
/**
* Include resource dimensions on the OTLP resource.
*/
data class AsResource(val resourceDimensions: Map) : PrescientDimensions
/**
* Include resource dimensions on each metric instead of on the Resource. You'd use this for
* downstreams that either do not support or do something undesirable with Resource dimensions.
*/
data class AsDimensions(val sharedDimensions: Map) : PrescientDimensions
}
enum class SecurityMode {
Plaintext,
Insecure,
Tls,
}
/**
* This client should be used as a last resort, in defeat, if you
* cannot use the goodmetrics protocol. Opentelemetry is highly
* lossy and inflexible. I'm doing my best here, but you're not
* getting the full goodmetrics experience if you're still
* addicted to opentelemetry line protocol.
*/
class OpentelemetryClient(
private val channel: ManagedChannel,
private val prescientDimensions: PrescientDimensions,
private val timeout: Duration,
) {
companion object {
fun connect(
sillyOtlpHostname: String = "localhost",
port: Int = 5001,
prescientDimensions: PrescientDimensions,
securityMode: SecurityMode,
/**
* stuff like MetadataUtils.newAttachHeadersInterceptor()
*/
interceptors: List,
timeout: Duration = 5.seconds,
): OpentelemetryClient {
val channelBuilder = NettyChannelBuilder.forAddress(sillyOtlpHostname, port)
when (securityMode) {
SecurityMode.Tls -> {
channelBuilder.useTransportSecurity()
}
SecurityMode.Insecure -> {
channelBuilder.useTransportSecurity()
channelBuilder.sslContext(
GrpcSslContexts.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build()
)
}
SecurityMode.Plaintext -> {
channelBuilder.usePlaintext()
}
}
channelBuilder.intercept(interceptors)
return OpentelemetryClient(channelBuilder.build(), prescientDimensions, timeout)
}
}
private fun stub(): MetricsServiceGrpcKt.MetricsServiceCoroutineStub = MetricsServiceGrpcKt.MetricsServiceCoroutineStub(
channel,
CallOptions.DEFAULT
.withDeadline(Deadline.after(timeout.inWholeMilliseconds, TimeUnit.MILLISECONDS))
)
suspend fun sendMetricsBatch(batch: List) {
val resourceMetricsBatch = asResourceMetrics(batch)
stub().export(
exportMetricsServiceRequest {
resourceMetrics.add(resourceMetricsBatch)
}
)
}
suspend fun sendPreaggregatedBatch(batch: List) {
val resourceMetricsBatch = asResourceMetricsFromBatch(batch)
stub().export(
exportMetricsServiceRequest {
resourceMetrics.add(resourceMetricsBatch)
}
)
}
private fun asResourceMetricsFromBatch(batch: List): ResourceMetrics {
return resourceMetrics {
prescientResource?.let { this.resource = it }
for (aggregate in batch) {
this.scopeMetrics.add(aggregate.asOtlpScopeMetrics())
}
}
}
private fun AggregatedBatch.asOtlpScopeMetrics(): ScopeMetrics = scopeMetrics {
scope = library
metrics.addAll([email protected]().asIterable())
}
private fun asResourceMetrics(batch: List): ResourceMetrics = resourceMetrics {
prescientResource?.let { this.resource = it }
for (metric in batch) {
this.scopeMetrics.add(asScopeMetrics(batch))
}
}
private fun asScopeMetrics(batch: List): ScopeMetrics = scopeMetrics {
scope = library
metrics.addAll(batch.asSequence().flatMap { it.asGoofyOtlpMetricSequence() }.asIterable())
}
private fun AggregatedBatch.asGoofyOtlpMetricSequence(): Sequence = sequence {
for ((position, measurements) in [email protected]) {
val otlpDimensions = position.map { it.asOtlpKeyValue() }
for ((measurementName, aggregation) in measurements) {
when (aggregation) {
is Aggregation.Histogram -> {
yield(
metric {
name = "${[email protected]}_$measurementName"
unit = "1"
histogram = aggregation.asOtlpHistogram(otlpDimensions, [email protected], aggregationWidth)
}
)
}
is Aggregation.StatisticSet -> {
yieldAll(aggregation.statisticSetToOtlp([email protected], measurementName, timestampNanos, aggregationWidth, otlpDimensions))
}
}
}
}
}
private fun Aggregation.StatisticSet.statisticSetToOtlp(
metric: String,
measurementName: String,
timestampNanos: Long,
aggregationWidth: Duration,
dimensions: Iterable,
): Sequence = sequence {
yield(statisticSetDataPoint(metric, measurementName, "min", min, timestampNanos, aggregationWidth, dimensions))
yield(statisticSetDataPoint(metric, measurementName, "max", max, timestampNanos, aggregationWidth, dimensions))
yield(statisticSetDataPoint(metric, measurementName, "sum", sum, timestampNanos, aggregationWidth, dimensions))
yield(statisticSetDataPoint(metric, measurementName, "count", count, timestampNanos, aggregationWidth, dimensions))
}
private fun statisticSetDataPoint(
metricName: String,
measurementName: String,
statisticSetComponent: String,
value: Number,
timestampNanos: Long,
aggregationWidth: Duration,
dimensions: Iterable,
): Metric = metric {
name = "${metricName}_${measurementName}_$statisticSetComponent"
unit = "1"
sum = sum {
isMonotonic = false
// because cumulative is bullshit
aggregationTemporality = AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA
dataPoints.add(newNumberDataPoint(value, timestampNanos, aggregationWidth, dimensions))
}
}
private fun Metrics.asGoofyOtlpMetricSequence(): Sequence {
val otlpDimensions = metricDimensions.values.map { it.asOtlpKeyValue() }
return sequence {
for ((measurementName, value) in [email protected]) {
yield(
metric {
// name: format!("{metric_name}_{measurement_name}", metric_name = datum.metric, measurement_name=name),
name = "${[email protected]}_$measurementName"
unit = "1"
gauge = gauge {
this.dataPoints.add(newNumberDataPoint(value, timestampNanos, (System.nanoTime() - startNanoTime).nanoseconds, otlpDimensions.asIterable()))
}
}
)
}
for ((measurementName, value) in [email protected]) {
yield(
metric {
// name: format!("{metric_name}_{measurement_name}", metric_name = datum.metric, measurement_name=name),
name = "${[email protected]}_$measurementName"
unit = "1"
histogram = asOtlpHistogram(otlpDimensions, value)
}
)
}
}
}
private fun newNumberDataPoint(value: Number, timestampNanos: Long, aggregationWidth: Duration, dimensions: Iterable) = numberDataPoint {
this.timeUnixNano = timestampNanos
this.startTimeUnixNano = timestampNanos - aggregationWidth.inWholeNanoseconds
attributes.addAll(dimensions)
if (value is Long || value is LongAdder) {
asInt = value.toLong()
} else {
asDouble = value.toDouble()
}
}
private fun Metrics.asOtlpHistogram(
otlpDimensions: Iterable,
value: Long
) = histogram {
// Because cumulative is bullshit for service metrics. Change my mind.
aggregationTemporality = AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA
dataPoints.add(
histogramDataPoint {
attributes.addAll(otlpDimensions)
startTimeUnixNano = timestampNanos - (System.nanoTime() - startNanoTime) // approximate, whatever.
timeUnixNano = timestampNanos
count = 1
val bucketValue = bucket(value)
if (0 < bucketValue) {
// This little humdinger is here so Lightstep can interpret the boundary for the _real_ measurement
// below. It's similar to the 0 that opentelemetry demands, but different in that it is actually a
// reasonable ask.
// Lightstep has an internal representation of histograms & while I don't pretend to understand
// how they've implemented them, they told me that they interpret the absence of a lower bounding
// bucket as an infinite lower bound. That's not consistent with my read of otlp BUT it makes
// infinitely more sense than imposing an upper infinity bucket upon your protocol.
// Prometheus is a cataclysm from which there is no redemption: It ruins developers' minds with
// its broken and much lauded blunders; it shames my profession by its protocol as well as those
// spawned through its vile influence and disappoints the thoughtful by its existence.
// But, you know, this particular thing for Lightstep seems fine because there's technical merit.
explicitBounds.add(bucketBelow(value).toDouble())
bucketCounts.add(0)
}
explicitBounds.add(bucketValue.toDouble())
bucketCounts.add(1)
bucketCounts.add(0) // otlp go die in a fire
}
)
}
private fun Aggregation.Histogram.asOtlpHistogram(
otlpDimensions: Iterable,
timestampNanos: Long,
aggregationWidth: Duration,
) = histogram {
// Because cumulative is bullshit for service metrics. Change my mind.
aggregationTemporality = AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA
dataPoints.add(
histogramDataPoint {
attributes.addAll(otlpDimensions)
startTimeUnixNano = timestampNanos - aggregationWidth.inWholeNanoseconds
timeUnixNano = timestampNanos
val sorted = [email protected]()
if (sorted.isNotEmpty() && 0 < sorted.firstKey()) {
// Again, a reasonable request from Lightstep to work around their lower-infinity interpretation
// of histogram data. Goodmetrics sends sparse histograms and if lightstep has further issues
// parsing these I might have to expand this to fill in 0's for "missing" buckets or delimit
// empty ranges with 0'd out buckets. They haven't asked for that yet though so I'm not doing it.
explicitBounds.add(bucketBelow(sorted.firstKey()).toDouble())
bucketCounts.add(0)
}
count = [email protected] { it.sum() }
for ((bucket, count) in sorted) {
explicitBounds.add(bucket.toDouble())
bucketCounts.add(count.sum())
}
bucketCounts.add(0) // because OTLP is _stupid_ and defined histogram format to have an implicit infinity bucket.
}
)
}
private val library = instrumentationScope {
name = "goodmetrics_kotlin"
version = OpentelemetryClient::class.java.`package`.implementationVersion ?: "development"
}
private val prescientResource by lazy {
when (prescientDimensions) {
is PrescientDimensions.AsDimensions -> {
null
}
is PrescientDimensions.AsResource -> {
resource {
attributes.addAll(prescientDimensions.resourceDimensions.asOtlpDimensions().asIterable())
}
}
}
}
private fun Map.asOtlpDimensions(): Sequence = sequence {
for (dimension in this@asOtlpDimensions) {
yield(dimension.value.asOtlpKeyValue())
}
}
private fun Metrics.Dimension.asOtlpKeyValue(): KeyValue = keyValue {
key = [email protected]
when (val v = this@asOtlpKeyValue) {
is Metrics.Dimension.Boolean -> {
value = anyValue { boolValue = v.value }
}
is Metrics.Dimension.Number -> {
value = anyValue { intValue = v.value }
}
is Metrics.Dimension.String -> {
value = anyValue { stringValue = v.value }
}
}
}
}