Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: opentelemetry/proto/metrics/v1/metrics.proto
package io.opentelemetry.proto.metrics.v1;
/**
*
* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
* time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
* summary statistics for a population of values, it may optionally contain the
* distribution of those values across a set of buckets.
*
*
* Protobuf type {@code opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint}
*/
public final class ExponentialHistogramDataPoint extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint)
ExponentialHistogramDataPointOrBuilder {
private static final long serialVersionUID = 0L;
// Use ExponentialHistogramDataPoint.newBuilder() to construct.
private ExponentialHistogramDataPoint(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ExponentialHistogramDataPoint() {
attributes_ = java.util.Collections.emptyList();
exemplars_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ExponentialHistogramDataPoint();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ExponentialHistogramDataPoint(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
if (!((mutable_bitField0_ & 0x00000001) != 0)) {
attributes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
attributes_.add(
input.readMessage(io.opentelemetry.proto.common.v1.KeyValue.parser(), extensionRegistry));
break;
}
case 17: {
startTimeUnixNano_ = input.readFixed64();
break;
}
case 25: {
timeUnixNano_ = input.readFixed64();
break;
}
case 33: {
count_ = input.readFixed64();
break;
}
case 41: {
bitField0_ |= 0x00000001;
sum_ = input.readDouble();
break;
}
case 48: {
scale_ = input.readSInt32();
break;
}
case 57: {
zeroCount_ = input.readFixed64();
break;
}
case 66: {
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.Builder subBuilder = null;
if (positive_ != null) {
subBuilder = positive_.toBuilder();
}
positive_ = input.readMessage(io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(positive_);
positive_ = subBuilder.buildPartial();
}
break;
}
case 74: {
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.Builder subBuilder = null;
if (negative_ != null) {
subBuilder = negative_.toBuilder();
}
negative_ = input.readMessage(io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(negative_);
negative_ = subBuilder.buildPartial();
}
break;
}
case 80: {
flags_ = input.readUInt32();
break;
}
case 90: {
if (!((mutable_bitField0_ & 0x00000004) != 0)) {
exemplars_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
exemplars_.add(
input.readMessage(io.opentelemetry.proto.metrics.v1.Exemplar.parser(), extensionRegistry));
break;
}
case 97: {
bitField0_ |= 0x00000002;
min_ = input.readDouble();
break;
}
case 105: {
bitField0_ |= 0x00000004;
max_ = input.readDouble();
break;
}
case 113: {
zeroThreshold_ = input.readDouble();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) != 0)) {
attributes_ = java.util.Collections.unmodifiableList(attributes_);
}
if (((mutable_bitField0_ & 0x00000004) != 0)) {
exemplars_ = java.util.Collections.unmodifiableList(exemplars_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_fieldAccessorTable
.ensureFieldAccessorsInitialized(
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.class, io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Builder.class);
}
public interface BucketsOrBuilder extends
// @@protoc_insertion_point(interface_extends:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)
com.google.protobuf.MessageOrBuilder {
/**
*
* Offset is the bucket index of the first entry in the bucket_counts array.
*
* Note: This uses a varint encoding as a simple form of compression.
*
*
* sint32 offset = 1;
* @return The offset.
*/
int getOffset();
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return A list containing the bucketCounts.
*/
java.util.List getBucketCountsList();
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return The count of bucketCounts.
*/
int getBucketCountsCount();
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param index The index of the element to return.
* @return The bucketCounts at the given index.
*/
long getBucketCounts(int index);
}
/**
*
* Buckets are a set of bucket counts, encoded in a contiguous array
* of counts.
*
*
* Protobuf type {@code opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets}
*/
public static final class Buckets extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)
BucketsOrBuilder {
private static final long serialVersionUID = 0L;
// Use Buckets.newBuilder() to construct.
private Buckets(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Buckets() {
bucketCounts_ = emptyLongList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Buckets();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Buckets(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
offset_ = input.readSInt32();
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000001) != 0)) {
bucketCounts_ = newLongList();
mutable_bitField0_ |= 0x00000001;
}
bucketCounts_.addLong(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) != 0) && input.getBytesUntilLimit() > 0) {
bucketCounts_ = newLongList();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
bucketCounts_.addLong(input.readUInt64());
}
input.popLimit(limit);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) != 0)) {
bucketCounts_.makeImmutable(); // C
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fieldAccessorTable
.ensureFieldAccessorsInitialized(
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.class, io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.Builder.class);
}
public static final int OFFSET_FIELD_NUMBER = 1;
private int offset_;
/**
*
* Offset is the bucket index of the first entry in the bucket_counts array.
*
* Note: This uses a varint encoding as a simple form of compression.
*
*
* sint32 offset = 1;
* @return The offset.
*/
@java.lang.Override
public int getOffset() {
return offset_;
}
public static final int BUCKET_COUNTS_FIELD_NUMBER = 2;
private com.google.protobuf.Internal.LongList bucketCounts_;
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return A list containing the bucketCounts.
*/
@java.lang.Override
public java.util.List
getBucketCountsList() {
return bucketCounts_;
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return The count of bucketCounts.
*/
public int getBucketCountsCount() {
return bucketCounts_.size();
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param index The index of the element to return.
* @return The bucketCounts at the given index.
*/
public long getBucketCounts(int index) {
return bucketCounts_.getLong(index);
}
private int bucketCountsMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (offset_ != 0) {
output.writeSInt32(1, offset_);
}
if (getBucketCountsList().size() > 0) {
output.writeUInt32NoTag(18);
output.writeUInt32NoTag(bucketCountsMemoizedSerializedSize);
}
for (int i = 0; i < bucketCounts_.size(); i++) {
output.writeUInt64NoTag(bucketCounts_.getLong(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (offset_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeSInt32Size(1, offset_);
}
{
int dataSize = 0;
for (int i = 0; i < bucketCounts_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(bucketCounts_.getLong(i));
}
size += dataSize;
if (!getBucketCountsList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
bucketCountsMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)) {
return super.equals(obj);
}
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets other = (io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets) obj;
if (getOffset()
!= other.getOffset()) return false;
if (!getBucketCountsList()
.equals(other.getBucketCountsList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + getOffset();
if (getBucketCountsCount() > 0) {
hash = (37 * hash) + BUCKET_COUNTS_FIELD_NUMBER;
hash = (53 * hash) + getBucketCountsList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Buckets are a set of bucket counts, encoded in a contiguous array
* of counts.
*
*
* Protobuf type {@code opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.BucketsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fieldAccessorTable
.ensureFieldAccessorsInitialized(
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.class, io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.Builder.class);
}
// Construct using io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
offset_ = 0;
bucketCounts_ = emptyLongList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return io.opentelemetry.proto.metrics.v1.MetricsProto.internal_static_opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_descriptor;
}
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets getDefaultInstanceForType() {
return io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.getDefaultInstance();
}
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets build() {
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets buildPartial() {
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets result = new io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets(this);
int from_bitField0_ = bitField0_;
result.offset_ = offset_;
if (((bitField0_ & 0x00000001) != 0)) {
bucketCounts_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000001);
}
result.bucketCounts_ = bucketCounts_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets) {
return mergeFrom((io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets other) {
if (other == io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets.getDefaultInstance()) return this;
if (other.getOffset() != 0) {
setOffset(other.getOffset());
}
if (!other.bucketCounts_.isEmpty()) {
if (bucketCounts_.isEmpty()) {
bucketCounts_ = other.bucketCounts_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureBucketCountsIsMutable();
bucketCounts_.addAll(other.bucketCounts_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int offset_ ;
/**
*
* Offset is the bucket index of the first entry in the bucket_counts array.
*
* Note: This uses a varint encoding as a simple form of compression.
*
*
* sint32 offset = 1;
* @return The offset.
*/
@java.lang.Override
public int getOffset() {
return offset_;
}
/**
*
* Offset is the bucket index of the first entry in the bucket_counts array.
*
* Note: This uses a varint encoding as a simple form of compression.
*
*
* sint32 offset = 1;
* @param value The offset to set.
* @return This builder for chaining.
*/
public Builder setOffset(int value) {
offset_ = value;
onChanged();
return this;
}
/**
*
* Offset is the bucket index of the first entry in the bucket_counts array.
*
* Note: This uses a varint encoding as a simple form of compression.
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return A list containing the bucketCounts.
*/
public java.util.List
getBucketCountsList() {
return ((bitField0_ & 0x00000001) != 0) ?
java.util.Collections.unmodifiableList(bucketCounts_) : bucketCounts_;
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return The count of bucketCounts.
*/
public int getBucketCountsCount() {
return bucketCounts_.size();
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param index The index of the element to return.
* @return The bucketCounts at the given index.
*/
public long getBucketCounts(int index) {
return bucketCounts_.getLong(index);
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param index The index to set the value at.
* @param value The bucketCounts to set.
* @return This builder for chaining.
*/
public Builder setBucketCounts(
int index, long value) {
ensureBucketCountsIsMutable();
bucketCounts_.setLong(index, value);
onChanged();
return this;
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param value The bucketCounts to add.
* @return This builder for chaining.
*/
public Builder addBucketCounts(long value) {
ensureBucketCountsIsMutable();
bucketCounts_.addLong(value);
onChanged();
return this;
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @param values The bucketCounts to add.
* @return This builder for chaining.
*/
public Builder addAllBucketCounts(
java.lang.Iterable extends java.lang.Long> values) {
ensureBucketCountsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, bucketCounts_);
onChanged();
return this;
}
/**
*
* bucket_counts is an array of count values, where bucket_counts[i] carries
* the count of the bucket at index (offset+i). bucket_counts[i] is the count
* of values greater than base^(offset+i) and less than or equal to
* base^(offset+i+1).
* Note: By contrast, the explicit HistogramDataPoint uses
* fixed64. This field is expected to have many buckets,
* especially zeros, so uint64 has been selected to ensure
* varint encoding.
*
*
* repeated uint64 bucket_counts = 2;
* @return This builder for chaining.
*/
public Builder clearBucketCounts() {
bucketCounts_ = emptyLongList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)
}
// @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets)
private static final io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets();
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public Buckets parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Buckets(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private int bitField0_;
public static final int ATTRIBUTES_FIELD_NUMBER = 1;
private java.util.List attributes_;
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
@java.lang.Override
public int getAttributesCount() {
return attributes_.size();
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
@java.lang.Override
public io.opentelemetry.proto.common.v1.KeyValueOrBuilder getAttributesOrBuilder(
int index) {
return attributes_.get(index);
}
public static final int START_TIME_UNIX_NANO_FIELD_NUMBER = 2;
private long startTimeUnixNano_;
/**
*
* StartTimeUnixNano is optional but strongly encouraged, see the
* the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 start_time_unix_nano = 2;
* @return The startTimeUnixNano.
*/
@java.lang.Override
public long getStartTimeUnixNano() {
return startTimeUnixNano_;
}
public static final int TIME_UNIX_NANO_FIELD_NUMBER = 3;
private long timeUnixNano_;
/**
*
* TimeUnixNano is required, see the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 time_unix_nano = 3;
* @return The timeUnixNano.
*/
@java.lang.Override
public long getTimeUnixNano() {
return timeUnixNano_;
}
public static final int COUNT_FIELD_NUMBER = 4;
private long count_;
/**
*
* count is the number of values in the population. Must be
* non-negative. This value must be equal to the sum of the "bucket_counts"
* values in the positive and negative Buckets plus the "zero_count" field.
*
*
* fixed64 count = 4;
* @return The count.
*/
@java.lang.Override
public long getCount() {
return count_;
}
public static final int SUM_FIELD_NUMBER = 5;
private double sum_;
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @return Whether the sum field is set.
*/
@java.lang.Override
public boolean hasSum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @return The sum.
*/
@java.lang.Override
public double getSum() {
return sum_;
}
public static final int SCALE_FIELD_NUMBER = 6;
private int scale_;
/**
*
* scale describes the resolution of the histogram. Boundaries are
* located at powers of the base, where:
* base = (2^(2^-scale))
* The histogram bucket identified by `index`, a signed integer,
* contains values that are greater than (base^index) and
* less than or equal to (base^(index+1)).
* The positive and negative ranges of the histogram are expressed
* separately. Negative values are mapped by their absolute value
* into the negative range using the same scale as the positive range.
* scale is not restricted by the protocol, as the permissible
* values depend on the range of the data.
*
*
* sint32 scale = 6;
* @return The scale.
*/
@java.lang.Override
public int getScale() {
return scale_;
}
public static final int ZERO_COUNT_FIELD_NUMBER = 7;
private long zeroCount_;
/**
*
* zero_count is the count of values that are either exactly zero or
* within the region considered zero by the instrumentation at the
* tolerated degree of precision. This bucket stores values that
* cannot be expressed using the standard exponential formula as
* well as values that have been rounded to zero.
* Implementations MAY consider the zero bucket to have probability
* mass equal to (zero_count / count).
*
*
* fixed64 zero_count = 7;
* @return The zeroCount.
*/
@java.lang.Override
public long getZeroCount() {
return zeroCount_;
}
public static final int POSITIVE_FIELD_NUMBER = 8;
private io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets positive_;
/**
*
* positive carries the positive range of exponential bucket counts.
*
*
* .opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets positive = 8;
* @return Whether the positive field is set.
*/
@java.lang.Override
public boolean hasPositive() {
return positive_ != null;
}
/**
*
* positive carries the positive range of exponential bucket counts.
*
* (Optional) List of exemplars collected from
* measurements that were used to form the data point
*
*
* repeated .opentelemetry.proto.metrics.v1.Exemplar exemplars = 11;
*/
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExemplarOrBuilder getExemplarsOrBuilder(
int index) {
return exemplars_.get(index);
}
public static final int MIN_FIELD_NUMBER = 12;
private double min_;
/**
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @return Whether the min field is set.
*/
@java.lang.Override
public boolean hasMin() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @return The min.
*/
@java.lang.Override
public double getMin() {
return min_;
}
public static final int MAX_FIELD_NUMBER = 13;
private double max_;
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @return Whether the max field is set.
*/
@java.lang.Override
public boolean hasMax() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @return The max.
*/
@java.lang.Override
public double getMax() {
return max_;
}
public static final int ZERO_THRESHOLD_FIELD_NUMBER = 14;
private double zeroThreshold_;
/**
*
* ZeroThreshold may be optionally set to convey the width of the zero
* region. Where the zero region is defined as the closed interval
* [-ZeroThreshold, ZeroThreshold].
* When ZeroThreshold is 0, zero count bucket stores values that cannot be
* expressed using the standard exponential formula as well as values that
* have been rounded to zero.
*
*
* double zero_threshold = 14;
* @return The zeroThreshold.
*/
@java.lang.Override
public double getZeroThreshold() {
return zeroThreshold_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < attributes_.size(); i++) {
output.writeMessage(1, attributes_.get(i));
}
if (startTimeUnixNano_ != 0L) {
output.writeFixed64(2, startTimeUnixNano_);
}
if (timeUnixNano_ != 0L) {
output.writeFixed64(3, timeUnixNano_);
}
if (count_ != 0L) {
output.writeFixed64(4, count_);
}
if (((bitField0_ & 0x00000001) != 0)) {
output.writeDouble(5, sum_);
}
if (scale_ != 0) {
output.writeSInt32(6, scale_);
}
if (zeroCount_ != 0L) {
output.writeFixed64(7, zeroCount_);
}
if (positive_ != null) {
output.writeMessage(8, getPositive());
}
if (negative_ != null) {
output.writeMessage(9, getNegative());
}
if (flags_ != 0) {
output.writeUInt32(10, flags_);
}
for (int i = 0; i < exemplars_.size(); i++) {
output.writeMessage(11, exemplars_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeDouble(12, min_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeDouble(13, max_);
}
if (java.lang.Double.doubleToRawLongBits(zeroThreshold_) != 0) {
output.writeDouble(14, zeroThreshold_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < attributes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, attributes_.get(i));
}
if (startTimeUnixNano_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeFixed64Size(2, startTimeUnixNano_);
}
if (timeUnixNano_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeFixed64Size(3, timeUnixNano_);
}
if (count_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeFixed64Size(4, count_);
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(5, sum_);
}
if (scale_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeSInt32Size(6, scale_);
}
if (zeroCount_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeFixed64Size(7, zeroCount_);
}
if (positive_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, getPositive());
}
if (negative_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(9, getNegative());
}
if (flags_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(10, flags_);
}
for (int i = 0; i < exemplars_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(11, exemplars_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(12, min_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(13, max_);
}
if (java.lang.Double.doubleToRawLongBits(zeroThreshold_) != 0) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(14, zeroThreshold_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint)) {
return super.equals(obj);
}
io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint other = (io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint) obj;
if (!getAttributesList()
.equals(other.getAttributesList())) return false;
if (getStartTimeUnixNano()
!= other.getStartTimeUnixNano()) return false;
if (getTimeUnixNano()
!= other.getTimeUnixNano()) return false;
if (getCount()
!= other.getCount()) return false;
if (hasSum() != other.hasSum()) return false;
if (hasSum()) {
if (java.lang.Double.doubleToLongBits(getSum())
!= java.lang.Double.doubleToLongBits(
other.getSum())) return false;
}
if (getScale()
!= other.getScale()) return false;
if (getZeroCount()
!= other.getZeroCount()) return false;
if (hasPositive() != other.hasPositive()) return false;
if (hasPositive()) {
if (!getPositive()
.equals(other.getPositive())) return false;
}
if (hasNegative() != other.hasNegative()) return false;
if (hasNegative()) {
if (!getNegative()
.equals(other.getNegative())) return false;
}
if (getFlags()
!= other.getFlags()) return false;
if (!getExemplarsList()
.equals(other.getExemplarsList())) return false;
if (hasMin() != other.hasMin()) return false;
if (hasMin()) {
if (java.lang.Double.doubleToLongBits(getMin())
!= java.lang.Double.doubleToLongBits(
other.getMin())) return false;
}
if (hasMax() != other.hasMax()) return false;
if (hasMax()) {
if (java.lang.Double.doubleToLongBits(getMax())
!= java.lang.Double.doubleToLongBits(
other.getMax())) return false;
}
if (java.lang.Double.doubleToLongBits(getZeroThreshold())
!= java.lang.Double.doubleToLongBits(
other.getZeroThreshold())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getAttributesCount() > 0) {
hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER;
hash = (53 * hash) + getAttributesList().hashCode();
}
hash = (37 * hash) + START_TIME_UNIX_NANO_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getStartTimeUnixNano());
hash = (37 * hash) + TIME_UNIX_NANO_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getTimeUnixNano());
hash = (37 * hash) + COUNT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getCount());
if (hasSum()) {
hash = (37 * hash) + SUM_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getSum()));
}
hash = (37 * hash) + SCALE_FIELD_NUMBER;
hash = (53 * hash) + getScale();
hash = (37 * hash) + ZERO_COUNT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getZeroCount());
if (hasPositive()) {
hash = (37 * hash) + POSITIVE_FIELD_NUMBER;
hash = (53 * hash) + getPositive().hashCode();
}
if (hasNegative()) {
hash = (37 * hash) + NEGATIVE_FIELD_NUMBER;
hash = (53 * hash) + getNegative().hashCode();
}
hash = (37 * hash) + FLAGS_FIELD_NUMBER;
hash = (53 * hash) + getFlags();
if (getExemplarsCount() > 0) {
hash = (37 * hash) + EXEMPLARS_FIELD_NUMBER;
hash = (53 * hash) + getExemplarsList().hashCode();
}
if (hasMin()) {
hash = (37 * hash) + MIN_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getMin()));
}
if (hasMax()) {
hash = (37 * hash) + MAX_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getMax()));
}
hash = (37 * hash) + ZERO_THRESHOLD_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getZeroThreshold()));
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
* time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
* summary statistics for a population of values, it may optionally contain the
* distribution of those values across a set of buckets.
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public int getAttributesCount() {
if (attributesBuilder_ == null) {
return attributes_.size();
} else {
return attributesBuilder_.getCount();
}
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public Builder setAttributes(
int index, io.opentelemetry.proto.common.v1.KeyValue value) {
if (attributesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAttributesIsMutable();
attributes_.set(index, value);
onChanged();
} else {
attributesBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public Builder addAttributes(io.opentelemetry.proto.common.v1.KeyValue value) {
if (attributesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAttributesIsMutable();
attributes_.add(value);
onChanged();
} else {
attributesBuilder_.addMessage(value);
}
return this;
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public Builder addAttributes(
int index, io.opentelemetry.proto.common.v1.KeyValue value) {
if (attributesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAttributesIsMutable();
attributes_.add(index, value);
onChanged();
} else {
attributesBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public io.opentelemetry.proto.common.v1.KeyValue.Builder getAttributesBuilder(
int index) {
return getAttributesFieldBuilder().getBuilder(index);
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
*
* repeated .opentelemetry.proto.common.v1.KeyValue attributes = 1;
*/
public io.opentelemetry.proto.common.v1.KeyValueOrBuilder getAttributesOrBuilder(
int index) {
if (attributesBuilder_ == null) {
return attributes_.get(index); } else {
return attributesBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* The set of key/value pairs that uniquely identify the timeseries from
* where this point belongs. The list may be empty (may contain 0 elements).
* Attribute keys MUST be unique (it is not allowed to have more than one
* attribute with the same key).
*
* StartTimeUnixNano is optional but strongly encouraged, see the
* the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 start_time_unix_nano = 2;
* @return The startTimeUnixNano.
*/
@java.lang.Override
public long getStartTimeUnixNano() {
return startTimeUnixNano_;
}
/**
*
* StartTimeUnixNano is optional but strongly encouraged, see the
* the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 start_time_unix_nano = 2;
* @param value The startTimeUnixNano to set.
* @return This builder for chaining.
*/
public Builder setStartTimeUnixNano(long value) {
startTimeUnixNano_ = value;
onChanged();
return this;
}
/**
*
* StartTimeUnixNano is optional but strongly encouraged, see the
* the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 start_time_unix_nano = 2;
* @return This builder for chaining.
*/
public Builder clearStartTimeUnixNano() {
startTimeUnixNano_ = 0L;
onChanged();
return this;
}
private long timeUnixNano_ ;
/**
*
* TimeUnixNano is required, see the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 time_unix_nano = 3;
* @return The timeUnixNano.
*/
@java.lang.Override
public long getTimeUnixNano() {
return timeUnixNano_;
}
/**
*
* TimeUnixNano is required, see the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 time_unix_nano = 3;
* @param value The timeUnixNano to set.
* @return This builder for chaining.
*/
public Builder setTimeUnixNano(long value) {
timeUnixNano_ = value;
onChanged();
return this;
}
/**
*
* TimeUnixNano is required, see the detailed comments above Metric.
* Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
* 1970.
*
*
* fixed64 time_unix_nano = 3;
* @return This builder for chaining.
*/
public Builder clearTimeUnixNano() {
timeUnixNano_ = 0L;
onChanged();
return this;
}
private long count_ ;
/**
*
* count is the number of values in the population. Must be
* non-negative. This value must be equal to the sum of the "bucket_counts"
* values in the positive and negative Buckets plus the "zero_count" field.
*
*
* fixed64 count = 4;
* @return The count.
*/
@java.lang.Override
public long getCount() {
return count_;
}
/**
*
* count is the number of values in the population. Must be
* non-negative. This value must be equal to the sum of the "bucket_counts"
* values in the positive and negative Buckets plus the "zero_count" field.
*
*
* fixed64 count = 4;
* @param value The count to set.
* @return This builder for chaining.
*/
public Builder setCount(long value) {
count_ = value;
onChanged();
return this;
}
/**
*
* count is the number of values in the population. Must be
* non-negative. This value must be equal to the sum of the "bucket_counts"
* values in the positive and negative Buckets plus the "zero_count" field.
*
*
* fixed64 count = 4;
* @return This builder for chaining.
*/
public Builder clearCount() {
count_ = 0L;
onChanged();
return this;
}
private double sum_ ;
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @return Whether the sum field is set.
*/
@java.lang.Override
public boolean hasSum() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @return The sum.
*/
@java.lang.Override
public double getSum() {
return sum_;
}
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @param value The sum to set.
* @return This builder for chaining.
*/
public Builder setSum(double value) {
bitField0_ |= 0x00000002;
sum_ = value;
onChanged();
return this;
}
/**
*
* sum of the values in the population. If count is zero then this field
* must be zero.
* Note: Sum should only be filled out when measuring non-negative discrete
* events, and is assumed to be monotonic over the values of these events.
* Negative events *can* be recorded, but sum should not be filled out when
* doing so. This is specifically to enforce compatibility w/ OpenMetrics,
* see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
*
*
* optional double sum = 5;
* @return This builder for chaining.
*/
public Builder clearSum() {
bitField0_ = (bitField0_ & ~0x00000002);
sum_ = 0D;
onChanged();
return this;
}
private int scale_ ;
/**
*
* scale describes the resolution of the histogram. Boundaries are
* located at powers of the base, where:
* base = (2^(2^-scale))
* The histogram bucket identified by `index`, a signed integer,
* contains values that are greater than (base^index) and
* less than or equal to (base^(index+1)).
* The positive and negative ranges of the histogram are expressed
* separately. Negative values are mapped by their absolute value
* into the negative range using the same scale as the positive range.
* scale is not restricted by the protocol, as the permissible
* values depend on the range of the data.
*
*
* sint32 scale = 6;
* @return The scale.
*/
@java.lang.Override
public int getScale() {
return scale_;
}
/**
*
* scale describes the resolution of the histogram. Boundaries are
* located at powers of the base, where:
* base = (2^(2^-scale))
* The histogram bucket identified by `index`, a signed integer,
* contains values that are greater than (base^index) and
* less than or equal to (base^(index+1)).
* The positive and negative ranges of the histogram are expressed
* separately. Negative values are mapped by their absolute value
* into the negative range using the same scale as the positive range.
* scale is not restricted by the protocol, as the permissible
* values depend on the range of the data.
*
*
* sint32 scale = 6;
* @param value The scale to set.
* @return This builder for chaining.
*/
public Builder setScale(int value) {
scale_ = value;
onChanged();
return this;
}
/**
*
* scale describes the resolution of the histogram. Boundaries are
* located at powers of the base, where:
* base = (2^(2^-scale))
* The histogram bucket identified by `index`, a signed integer,
* contains values that are greater than (base^index) and
* less than or equal to (base^(index+1)).
* The positive and negative ranges of the histogram are expressed
* separately. Negative values are mapped by their absolute value
* into the negative range using the same scale as the positive range.
* scale is not restricted by the protocol, as the permissible
* values depend on the range of the data.
*
*
* sint32 scale = 6;
* @return This builder for chaining.
*/
public Builder clearScale() {
scale_ = 0;
onChanged();
return this;
}
private long zeroCount_ ;
/**
*
* zero_count is the count of values that are either exactly zero or
* within the region considered zero by the instrumentation at the
* tolerated degree of precision. This bucket stores values that
* cannot be expressed using the standard exponential formula as
* well as values that have been rounded to zero.
* Implementations MAY consider the zero bucket to have probability
* mass equal to (zero_count / count).
*
*
* fixed64 zero_count = 7;
* @return The zeroCount.
*/
@java.lang.Override
public long getZeroCount() {
return zeroCount_;
}
/**
*
* zero_count is the count of values that are either exactly zero or
* within the region considered zero by the instrumentation at the
* tolerated degree of precision. This bucket stores values that
* cannot be expressed using the standard exponential formula as
* well as values that have been rounded to zero.
* Implementations MAY consider the zero bucket to have probability
* mass equal to (zero_count / count).
*
*
* fixed64 zero_count = 7;
* @param value The zeroCount to set.
* @return This builder for chaining.
*/
public Builder setZeroCount(long value) {
zeroCount_ = value;
onChanged();
return this;
}
/**
*
* zero_count is the count of values that are either exactly zero or
* within the region considered zero by the instrumentation at the
* tolerated degree of precision. This bucket stores values that
* cannot be expressed using the standard exponential formula as
* well as values that have been rounded to zero.
* Implementations MAY consider the zero bucket to have probability
* mass equal to (zero_count / count).
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @return Whether the min field is set.
*/
@java.lang.Override
public boolean hasMin() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @return The min.
*/
@java.lang.Override
public double getMin() {
return min_;
}
/**
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @param value The min to set.
* @return This builder for chaining.
*/
public Builder setMin(double value) {
bitField0_ |= 0x00000008;
min_ = value;
onChanged();
return this;
}
/**
*
* min is the minimum value over (start_time, end_time].
*
*
* optional double min = 12;
* @return This builder for chaining.
*/
public Builder clearMin() {
bitField0_ = (bitField0_ & ~0x00000008);
min_ = 0D;
onChanged();
return this;
}
private double max_ ;
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @return Whether the max field is set.
*/
@java.lang.Override
public boolean hasMax() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @return The max.
*/
@java.lang.Override
public double getMax() {
return max_;
}
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @param value The max to set.
* @return This builder for chaining.
*/
public Builder setMax(double value) {
bitField0_ |= 0x00000010;
max_ = value;
onChanged();
return this;
}
/**
*
* max is the maximum value over (start_time, end_time].
*
*
* optional double max = 13;
* @return This builder for chaining.
*/
public Builder clearMax() {
bitField0_ = (bitField0_ & ~0x00000010);
max_ = 0D;
onChanged();
return this;
}
private double zeroThreshold_ ;
/**
*
* ZeroThreshold may be optionally set to convey the width of the zero
* region. Where the zero region is defined as the closed interval
* [-ZeroThreshold, ZeroThreshold].
* When ZeroThreshold is 0, zero count bucket stores values that cannot be
* expressed using the standard exponential formula as well as values that
* have been rounded to zero.
*
* ZeroThreshold may be optionally set to convey the width of the zero
* region. Where the zero region is defined as the closed interval
* [-ZeroThreshold, ZeroThreshold].
* When ZeroThreshold is 0, zero count bucket stores values that cannot be
* expressed using the standard exponential formula as well as values that
* have been rounded to zero.
*
*
* double zero_threshold = 14;
* @param value The zeroThreshold to set.
* @return This builder for chaining.
*/
public Builder setZeroThreshold(double value) {
zeroThreshold_ = value;
onChanged();
return this;
}
/**
*
* ZeroThreshold may be optionally set to convey the width of the zero
* region. Where the zero region is defined as the closed interval
* [-ZeroThreshold, ZeroThreshold].
* When ZeroThreshold is 0, zero count bucket stores values that cannot be
* expressed using the standard exponential formula as well as values that
* have been rounded to zero.
*
*
* double zero_threshold = 14;
* @return This builder for chaining.
*/
public Builder clearZeroThreshold() {
zeroThreshold_ = 0D;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint)
}
// @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint)
private static final io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint();
}
public static io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public ExponentialHistogramDataPoint parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ExponentialHistogramDataPoint(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public io.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}