All Downloads are FREE. Search and download functionalities are using the official Maven repository.

e.kafka-client.0.34.0.source-code.otel-jmx.config.yaml Maven / Gradle / Ivy

The newest version!
---
rules:

#### Responsive Metrics ####

 # Responsive application metrics
 - bean: dev.responsive:type=application-metrics,responsive-version=*,responsive-commit-id=*,streams-version=*,streams-commit-id=*,consumer-group=*,streams-application-id=*,streams-client-id=*
   metricAttribute:
     responsiveVersion: param(responsive-version)
     responsiveCommitId: param(responsive-commit-id)
     streamsVersion: param(streams-version)
     streamsCommitId: param(streams-commit-id)
     consumerGroup: param(consumer-group)
     streamsApplicationId: param(streams-application-id)
     streamsClientId: param(streams-client-id)
   mapping:
     streams-state:
       metric: responsive.kafka.streams.state
       type: gauge
       desc: the current state of this Kafka Streams client
       unit: '{KafkaStreams.State}'
     num-restoring-changelogs:
       metric: responsive.kafka.streams.num.restoring.changelogs
       type: gauge
       desc: the number of state stores currently being restored from a changelog
       unit: '{stores}'
     num-interrupted-changelogs:
       metric: responsive.kafka.streams.num.interrupted.changelogs
       desc: the number of state stores that were interrupted before finishing restoration
       unit: '{stores}'

 # Responsive topic metrics
 - bean: dev.responsive:type=topic-metrics,responsive-version=*,responsive-commit-id=*,streams-version=*,streams-commit-id=*,consumer-group=*,streams-application-id=*,streams-client-id=*,thread-id=*,topic=*,partition=*
   metricAttribute:
     responsiveVersion: param(responsive-version)
     responsiveCommitId: param(responsive-commit-id)
     streamsVersion: param(streams-version)
     streamsCommitId: param(streams-commit-id)
     consumerGroup: param(consumer-group)
     streamsApplicationId: param(streams-application-id)
     streamsClientId: param(streams-client-id)
     thread: param(thread-id)
     partition: param(partition)
     topic: param(topic)
   mapping:
     end-offset:
       metric: responsive.kafka.streams.source.offset.end
       type: gauge
       desc: the current end offset of the partition
       unit: '{offset}'
     committed-offset:
       metric: responsive.kafka.streams.source.offset.committed
       type: gauge
       desc: the current committed offset of the partition
       unit: '{offset}'

 # Responsive store metrics
 - bean: dev.responsive:type=store-metrics,responsive-version=*,responsive-commit-id=*,streams-version=*,streams-commit-id=*,consumer-group=*,streams-application-id=*,streams-client-id=*,thread-id=*,topic=*,partition=*,store=*
   metricAttribute:
     responsiveVersion: param(responsive-version)
     responsiveCommitId: param(responsive-commit-id)
     streamsVersion: param(streams-version)
     streamsCommitId: param(streams-commit-id)
     consumerGroup: param(consumer-group)
     streamsApplicationId: param(streams-application-id)
     streamsClientId: param(streams-client-id)
     thread: param(thread-id)
     partition: param(partition)
     topic: param(topic)
     store: param(store)
   mapping:
     time-restoring:
       metric: responsive.kafka.streams.time.restoring
       type: gauge
       desc: the amount of time since this state store started restoration
       unit: '{milliseconds}'
     time-since-last-flush:
       metric: responsive.kafka.streams.time.since.last.flush
       type: gauge
       desc: the amount of time since the last successful flush
       unit: '{milliseconds}'
     flush-rate:
       metric: responsive.kafka.streams.flush.rate
       desc: the rate of commit buffer flushes
       unit: '{flushes/s}'
     flush-total:
       metric: responsive.kafka.streams.flush.total
       desc: the total number of commit buffer flushes
       unit: '{flushes}'
     flush-latency-avg:
       metric: responsive.kafka.streams.flush.latency.avg
       desc: the average time it took to flush the commit buffer
       unit: '{milliseconds}'
     flush-latency-max:
       metric: responsive.kafka.streams.flush.latency.max
       desc: the maximum time it took to flush the commit buffer
       unit: '{milliseconds}'
     flush-errors-rate:
       metric: responsive.kafka.streams.flush.errors.rate
       desc: the rate of commit buffer flushes that failed
       unit: '{flushes/s}'
     flush-errors-total:
       metric: responsive.kafka.streams.flush.errors.total
       desc: the total number of commit buffer flushes that failed
       unit: '{flushes}'
     failed-truncations-rate:
       metric: responsive.kafka.streams.failed.truncations.rate
       desc: the rate of changelog truncation attempts that failed
       unit: '{deletes/s}'
     failed-truncations-total:
       metric: responsive.kafka.streams.failed.truncations.total
       desc: the total number of changelog truncation attempts that failed
       unit: '{deletes}'


 #### Apache Kafka Metrics ####

 # Consumer Client metrics
 - bean: kafka.consumer:type=consumer-fetch-manager-metrics,partition=*,topic=*,client-id=*
   metricAttribute:
     partition: param(partition)
     topic: param(topic)
     clientId: param(client-id)
   mapping:
     records-lag:
       metric: kafka.streams.records.lag
       type: gauge
       desc: the current lag of the partition
       unit: '{records}'
 - bean: kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*
   metricAttribute:
     clientId: param(client-id)
   mapping:
     records-lag-max:
       metric: kafka.streams.records.lag.max
       type: gauge
       desc: the max lag of all partitions
       unit: '{records}'
 - bean: kafka.consumer:type=consumer-coordinator-metrics,client-id=*
   metricAttribute:
     clientId: param(client-id)
   mapping:
     assigned-partitions:
       metric: kafka.streams.assigned.partitions
       type: gauge
       desc: the number of assigned partitions
       unit: '{partitions}'
     last-rebalance-seconds-ago:
       metric: kafka.streams.rebalance.seconds.ago
       type: gauge
       desc: the time since last rebalance
       unit: '{seconds}'
     rebalance-rate-per-hour:
       metric: kafka.streams.rebalance.rate
       type: gauge
       desc: the rate of rebalances
       unit: '{rebalances-per-hour}'
     failed-rebalance-rate-per-hour:
       metric: kafka.streams.failed.rebalance.rate
       type: gauge
       desc: the rate of failed rebalances
       unit: '{rebalances-per-hour}'

 - bean: kafka.consumer:type=consumer-metrics,client-id=*
   metricAttribute:
     clientid: param(client-id)
   mapping:
     io-wait-time-ns-total:
       metric: kafka.consumer.io.wait.time.ns.total
       type: gauge
       desc: the total time waiting for new records from kafka
       unit: '{nanoseconds}'
     io-time-ns-total:
       metric: kafka.consumer.io.time.ns.total
       type: gauge
       desc: the total time waiting for reads from kafka
       unit: '{nanoseconds}'

 - bean: kafka.producer:type=producer-metrics,client-id=*
   metricAttribute:
     clientid: param(client-id)
   mapping:
     txn-init-time-ns-total:
       metric: kafka.producer.txn.init.time.ns.total
       type: gauge
       desc: the total time spent initializing transactions for the producer
       unit: '{nanoseconds}'
     txn-abort-time-ns-total:
       metric: kafka.producer.txn.abort.time.ns.total
       type: gauge
       desc: the total time spent aborting transactions in the producer
       unit: '{nanoseconds}'
     txn-commit-time-ns-total:
       metric: kafka.producer.txn.commit.time.ns.total
       type: gauge
       desc: the total time spent committing in the producer
       unit: '{nanoseconds}'
     bufferpool-wait-time-ns-total:
       metric: kafka.producer.bufferpool.wait.time.ns.total
       type: gauge
       desc: the total time spent waiting on writes to kafka
       unit: '{nanoseconds}'
     flush-time-ns-total:
       metric: kafka.producer.flush.time.ns.total
       type: gauge
       desc: the total time spent waiting on producer flush
       unit: '{nanoseconds}'
     txn-send-offsets-time-ns-total:
       metric: kafka.producer.txn.send.offsets.time.ns.total
       type: gauge
       desc: the total time spent sending offsets in producer
       unit: '{nanoseconds}'
     metadata-wait-time-ns-total:
        metric: kafka.producer.metadata.wait.time.ns.total
        type: gauge
        desc: the total time spent waiting on producer metadata
        unit: '{nanoseconds}'

  # Kafka Streams client metrics
 - bean: kafka.streams:type=stream-metrics,client-id=*
   metricAttribute:
     clientId: param(client-id)
   mapping:
     failed-stream-threads:
       metric: kafka.streams.failed.stream.threads
       type: gauge
       desc: The number of failed stream threads since this client started
       unit: '{threads}'

 # Kafka Streams topic metrics
 - bean: kafka.streams:type=stream-topic-metrics,thread-id=*,task-id=*,processor-node-id=*,topic=*
   metricAttribute:
     thread: param(thread-id)
     task: param(task-id)
     processor: param(processor-node-id)
     topic: param(topic)
   mapping:
     records-consumed-total:
       metric: kafka.streams.topic.records.consumed.total
       type: gauge
       desc: the total records consumed
       unit: '{records}'
     bytes-consumed-total:
       metric: kafka.streams.topic.bytes.consumed.total
       type: gauge
       desc: the total bytes consumed
       unit: '{bytes}'
     records-produced-total:
       metric: kafka.streams.topic.records.produced.total
       type: gauge
       desc: the total records produced
       unit: '{records}'
     bytes-produced-total:
       metric: kafka.streams.topic.bytes.produced.total
       type: gauge
       desc: the total bytes produced
       unit: '{bytes}'

 # Kafka Streams thread metrics
 - bean: kafka.streams:type=stream-thread-metrics,thread-id=*
   metricAttribute:
     thread: param(thread-id)
   mapping:
     process-total:
       metric: kafka.streams.thread.process.total
       type: gauge
       desc: total records processed
       unit: '{records}'
     process-rate:
       metric: kafka.streams.thread.process.rate
       type: gauge
       desc: rate of records processed
       unit: '{records-per-second}'
     process-ratio:
       metric: kafka.streams.thread.process.ratio
       type: gauge
       desc: fraction of time spent in process
     commit-rate:
       metric: kafka.streams.thread.commit.rate
       type: gauge
       desc: rate of commit
       unit: '{ops-per-second}'
     commit-ratio:
       metric: kafka.streams.thread.commit.ratio
       type: gauge
       desc: fraction of time spent in commit
     commit-latency-avg:
       metric: kafka.streams.thread.commit.latency.avg
       type: gauge
       desc: avg latency of a commit
       unit: '{milliseconds}'
     commit-latency-max:
       metric: kafka.streams.thread.commit.latency.max
       type: gauge
       desc: max latency of a commit
       unit: '{milliseconds}'
     poll-rate:
       metric: kafka.streams.thread.poll.rate
       type: gauge
       desc: rate of poll calls
       unit: '{ops-per-second}'
     poll-ratio:
       metric: kafka.streams.thread.poll.ratio
       type: gauge
       desc: fraction of time spent in poll
     poll-records-avg:
       metric: kafka.streams.thread.poll.records.avg
       type: gauge
       desc: avg records per poll
       unit: '{records}'
     punctuate-ratio:
       metric: kafka.streams.thread.punctuate.ratio
       type: gauge
       desc: fraction of time spent in punctuate
     task-closed-rate:
       metric: kafka.streams.thread.task.closed.rate
       type: gauge
       desc: rate of tasks closed
       unit: '{tasks-per-second}'
     blocked-time-ns-total:
       metric: kafka.streams.thread.blocked.time.total.ns
       type: gauge
       desc: total time the stream thread was blocked
       unit: '{nanoseconds}'
     thread-start-time:
       metric: kafka.streams.thread.start.time
       type: gauge
       desc: the time the kafka streams thread was started
       unit: '{milliseconds}'

 # Kafka Streams task metrics
 ## THESE ARE GENERALLY **DEBUG** METRICS AND MIGHT NOT BE ENABLED
 ## Most Streams metrics below the client and thread level are debug or even trace
 - bean: kafka.streams:type=stream-task-metrics,thread-id=*,task-id=*
   metricAttribute:
     thread: param(thread-id)
     task: param(task-id)
   mapping:
     ## INFO
     active-process-ratio:
       metric: kafka.streams.task.process.ratio
       type: gauge
       desc: fraction of time spent processing this task out of all the active tasks
     ## DEBUG
     process-total:
       metric: kafka.streams.task.process.total
       type: gauge
       desc: total records processed
       unit: '{records}'

 # MongoDB Client Metrics
 - bean: dev.responsive:type=mongodb-client,command=*
   metricAttribute:
       command: param(command)
   mapping:
       commands-succeeded-count:
         metric: mongodb.client.commands.succeeded.count
         type: gauge
         desc: count of commands that succeeded
         unit: '{commands}'
       commands-succeeded-cumulative-latency:
         metric: mongodb.client.commands.succeeded.cumulative.latency
         type: gauge
         desc: cumulative commands succeeded latency
         unit: '{milliseconds}'
       commands-failed-count:
         metric: mongodb.client.commands.failed.count
         type: gauge
         desc: count of commands that failed
         unit: '{commands}'
       commands-failed-cumulative-latency:
         metric: mongodb.client.commands.succeeded.failed.latency
         type: gauge
         desc: cumulative commands failed latency
         unit: '{milliseconds}'

  # Cassandra Client Metrics
 - bean: dev.responsive:type=cassandra-client
   mapping:
      bytes-sent:
        metric: cassandra.driver.bytes.sent.count
        type: gauge
        desc: count of bytes sent
        unit: '{bytes}'
      bytes-received:
        metric: cassandra.driver.bytes.received.count
        type: gauge
        desc: count of bytes received
        unit: '{bytes}'
      cql-requests-count:
        metric: cassandra.driver.cql.request.count
        type: gauge
        desc: count of CQL requests
        unit: '{requests}'
      cql-requests-cumulative-latency:
        metric: cassandra.driver.cql.request.cumulative.latency
        type: gauge
        desc: cumulative CQL request latency in milliseconds
        unit: '{milliseconds}'
      cql-request-errors-count:
        metric: cassandra.driver.cql.request.errors
        type: gauge
        desc: count of all request errors
        unit: '{errors}'
      cql-request-timeouts-count:
        metric: cassandra.driver.cql.request.timeout.count
        type: gauge
        desc: count of CQL request timeouts
        unit: '{errors}'
      cql-request-ignores-count:
        metric: cassandra.driver.cql.request.ignores.count
        type: gauge
        desc: count of all request ignores
        unit: '{errors}'
      cql-request-retries-count:
        metric: cassandra.driver.cql.request.retries.count
        type: gauge
        desc: count of all request retries
        unit: '{errors}'
      throttling-cumulative-delay:
        metric: cassandra.driver.throttling.cumulative.delay
        type: gauge
        desc: cumulative throttling delay in milliseconds
        unit: '{milliseconds}'
      throttling-errors-count:
        metric: cassandra.driver.throttling.errors.count
        type: gauge
        desc: count of throttling errors
        unit: '{errors}'
      connection-errors-count:
        metric: cassandra.driver.connection.errors.count
        type: gauge
        desc: cumulative count of all connection errors
        unit: '{errors}'




© 2015 - 2024 Weber Informatics LLC | Privacy Policy