All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.opencensus.proto.stats.v1.DistributionAggregation Maven / Gradle / Ivy

There is a newer version: 1.0.47
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: opencensus/proto/stats/v1/stats.proto

// Protobuf Java Version: 3.25.1
package io.opencensus.proto.stats.v1;

/**
 * Protobuf type {@code opencensus.proto.stats.v1.DistributionAggregation}
 */
public final class DistributionAggregation extends
    com.google.protobuf.GeneratedMessageV3 implements
    // @@protoc_insertion_point(message_implements:opencensus.proto.stats.v1.DistributionAggregation)
    DistributionAggregationOrBuilder {
private static final long serialVersionUID = 0L;
  // Use DistributionAggregation.newBuilder() to construct.
  private DistributionAggregation(com.google.protobuf.GeneratedMessageV3.Builder builder) {
    super(builder);
  }
  private DistributionAggregation() {
    bucketBounds_ = emptyDoubleList();
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(
      UnusedPrivateParameter unused) {
    return new DistributionAggregation();
  }

  public static final com.google.protobuf.Descriptors.Descriptor
      getDescriptor() {
    return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            io.opencensus.proto.stats.v1.DistributionAggregation.class, io.opencensus.proto.stats.v1.DistributionAggregation.Builder.class);
  }

  public static final int BUCKET_BOUNDS_FIELD_NUMBER = 1;
  @SuppressWarnings("serial")
  private com.google.protobuf.Internal.DoubleList bucketBounds_ =
      emptyDoubleList();
  /**
   * 
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   *
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   *
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   *
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @return A list containing the bucketBounds. */ @java.lang.Override public java.util.List getBucketBoundsList() { return bucketBounds_; } /** *
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   *
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   *
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   *
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @return The count of bucketBounds. */ public int getBucketBoundsCount() { return bucketBounds_.size(); } /** *
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   *
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   *
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   *
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @param index The index of the element to return. * @return The bucketBounds at the given index. */ public double getBucketBounds(int index) { return bucketBounds_.getDouble(index); } private int bucketBoundsMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (getBucketBoundsList().size() > 0) { output.writeUInt32NoTag(10); output.writeUInt32NoTag(bucketBoundsMemoizedSerializedSize); } for (int i = 0; i < bucketBounds_.size(); i++) { output.writeDoubleNoTag(bucketBounds_.getDouble(i)); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; dataSize = 8 * getBucketBoundsList().size(); size += dataSize; if (!getBucketBoundsList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } bucketBoundsMemoizedSerializedSize = dataSize; } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof io.opencensus.proto.stats.v1.DistributionAggregation)) { return super.equals(obj); } io.opencensus.proto.stats.v1.DistributionAggregation other = (io.opencensus.proto.stats.v1.DistributionAggregation) obj; if (!getBucketBoundsList() .equals(other.getBucketBoundsList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getBucketBoundsCount() > 0) { hash = (37 * hash) + BUCKET_BOUNDS_FIELD_NUMBER; hash = (53 * hash) + getBucketBoundsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(io.opencensus.proto.stats.v1.DistributionAggregation prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code opencensus.proto.stats.v1.DistributionAggregation} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:opencensus.proto.stats.v1.DistributionAggregation) io.opencensus.proto.stats.v1.DistributionAggregationOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_fieldAccessorTable .ensureFieldAccessorsInitialized( io.opencensus.proto.stats.v1.DistributionAggregation.class, io.opencensus.proto.stats.v1.DistributionAggregation.Builder.class); } // Construct using io.opencensus.proto.stats.v1.DistributionAggregation.newBuilder() private Builder() { } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; bucketBounds_ = emptyDoubleList(); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstanceForType() { return io.opencensus.proto.stats.v1.DistributionAggregation.getDefaultInstance(); } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation build() { io.opencensus.proto.stats.v1.DistributionAggregation result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation buildPartial() { io.opencensus.proto.stats.v1.DistributionAggregation result = new io.opencensus.proto.stats.v1.DistributionAggregation(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(io.opencensus.proto.stats.v1.DistributionAggregation result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { bucketBounds_.makeImmutable(); result.bucketBounds_ = bucketBounds_; } } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.opencensus.proto.stats.v1.DistributionAggregation) { return mergeFrom((io.opencensus.proto.stats.v1.DistributionAggregation)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.opencensus.proto.stats.v1.DistributionAggregation other) { if (other == io.opencensus.proto.stats.v1.DistributionAggregation.getDefaultInstance()) return this; if (!other.bucketBounds_.isEmpty()) { if (bucketBounds_.isEmpty()) { bucketBounds_ = other.bucketBounds_; bucketBounds_.makeImmutable(); bitField0_ |= 0x00000001; } else { ensureBucketBoundsIsMutable(); bucketBounds_.addAll(other.bucketBounds_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 9: { double v = input.readDouble(); ensureBucketBoundsIsMutable(); bucketBounds_.addDouble(v); break; } // case 9 case 10: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); int alloc = length > 4096 ? 4096 : length; ensureBucketBoundsIsMutable(alloc / 8); while (input.getBytesUntilLimit() > 0) { bucketBounds_.addDouble(input.readDouble()); } input.popLimit(limit); break; } // case 10 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private com.google.protobuf.Internal.DoubleList bucketBounds_ = emptyDoubleList(); private void ensureBucketBoundsIsMutable() { if (!bucketBounds_.isModifiable()) { bucketBounds_ = makeMutableCopy(bucketBounds_); } bitField0_ |= 0x00000001; } private void ensureBucketBoundsIsMutable(int capacity) { if (!bucketBounds_.isModifiable()) { bucketBounds_ = makeMutableCopy(bucketBounds_, capacity); } bitField0_ |= 0x00000001; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return A list containing the bucketBounds. */ public java.util.List getBucketBoundsList() { bucketBounds_.makeImmutable(); return bucketBounds_; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return The count of bucketBounds. */ public int getBucketBoundsCount() { return bucketBounds_.size(); } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param index The index of the element to return. * @return The bucketBounds at the given index. */ public double getBucketBounds(int index) { return bucketBounds_.getDouble(index); } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param index The index to set the value at. * @param value The bucketBounds to set. * @return This builder for chaining. */ public Builder setBucketBounds( int index, double value) { ensureBucketBoundsIsMutable(); bucketBounds_.setDouble(index, value); bitField0_ |= 0x00000001; onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param value The bucketBounds to add. * @return This builder for chaining. */ public Builder addBucketBounds(double value) { ensureBucketBoundsIsMutable(); bucketBounds_.addDouble(value); bitField0_ |= 0x00000001; onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param values The bucketBounds to add. * @return This builder for chaining. */ public Builder addAllBucketBounds( java.lang.Iterable values) { ensureBucketBoundsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, bucketBounds_); bitField0_ |= 0x00000001; onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     *
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     *
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     *
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return This builder for chaining. */ public Builder clearBucketBounds() { bucketBounds_ = emptyDoubleList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:opencensus.proto.stats.v1.DistributionAggregation) } // @@protoc_insertion_point(class_scope:opencensus.proto.stats.v1.DistributionAggregation) private static final io.opencensus.proto.stats.v1.DistributionAggregation DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new io.opencensus.proto.stats.v1.DistributionAggregation(); } public static io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public DistributionAggregation parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy