All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.opencensus.proto.stats.v1.DistributionAggregation Maven / Gradle / Ivy

There is a newer version: 1.0.46
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: opencensus/proto/stats/v1/stats.proto

package io.opencensus.proto.stats.v1;

/**
 * Protobuf type {@code opencensus.proto.stats.v1.DistributionAggregation}
 */
public final class DistributionAggregation extends
    com.google.protobuf.GeneratedMessageV3 implements
    // @@protoc_insertion_point(message_implements:opencensus.proto.stats.v1.DistributionAggregation)
    DistributionAggregationOrBuilder {
private static final long serialVersionUID = 0L;
  // Use DistributionAggregation.newBuilder() to construct.
  private DistributionAggregation(com.google.protobuf.GeneratedMessageV3.Builder builder) {
    super(builder);
  }
  private DistributionAggregation() {
    bucketBounds_ = emptyDoubleList();
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(
      UnusedPrivateParameter unused) {
    return new DistributionAggregation();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet
  getUnknownFields() {
    return this.unknownFields;
  }
  private DistributionAggregation(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    this();
    if (extensionRegistry == null) {
      throw new java.lang.NullPointerException();
    }
    int mutable_bitField0_ = 0;
    com.google.protobuf.UnknownFieldSet.Builder unknownFields =
        com.google.protobuf.UnknownFieldSet.newBuilder();
    try {
      boolean done = false;
      while (!done) {
        int tag = input.readTag();
        switch (tag) {
          case 0:
            done = true;
            break;
          case 9: {
            if (!((mutable_bitField0_ & 0x00000001) != 0)) {
              bucketBounds_ = newDoubleList();
              mutable_bitField0_ |= 0x00000001;
            }
            bucketBounds_.addDouble(input.readDouble());
            break;
          }
          case 10: {
            int length = input.readRawVarint32();
            int limit = input.pushLimit(length);
            if (!((mutable_bitField0_ & 0x00000001) != 0) && input.getBytesUntilLimit() > 0) {
              bucketBounds_ = newDoubleList();
              mutable_bitField0_ |= 0x00000001;
            }
            while (input.getBytesUntilLimit() > 0) {
              bucketBounds_.addDouble(input.readDouble());
            }
            input.popLimit(limit);
            break;
          }
          default: {
            if (!parseUnknownField(
                input, unknownFields, extensionRegistry, tag)) {
              done = true;
            }
            break;
          }
        }
      }
    } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      throw e.setUnfinishedMessage(this);
    } catch (com.google.protobuf.UninitializedMessageException e) {
      throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this);
    } catch (java.io.IOException e) {
      throw new com.google.protobuf.InvalidProtocolBufferException(
          e).setUnfinishedMessage(this);
    } finally {
      if (((mutable_bitField0_ & 0x00000001) != 0)) {
        bucketBounds_.makeImmutable(); // C
      }
      this.unknownFields = unknownFields.build();
      makeExtensionsImmutable();
    }
  }
  public static final com.google.protobuf.Descriptors.Descriptor
      getDescriptor() {
    return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            io.opencensus.proto.stats.v1.DistributionAggregation.class, io.opencensus.proto.stats.v1.DistributionAggregation.Builder.class);
  }

  public static final int BUCKET_BOUNDS_FIELD_NUMBER = 1;
  private com.google.protobuf.Internal.DoubleList bucketBounds_;
  /**
   * 
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @return A list containing the bucketBounds. */ @java.lang.Override public java.util.List getBucketBoundsList() { return bucketBounds_; } /** *
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @return The count of bucketBounds. */ public int getBucketBoundsCount() { return bucketBounds_.size(); } /** *
   * A Distribution may optionally contain a histogram of the values in the
   * population. The bucket boundaries for that histogram are described by
   * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
   * buckets. The boundaries for bucket index i are:
   * (-infinity, bucket_bounds[i]) for i == 0
   * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
   * [bucket_bounds[i-1], +infinity) for i == N-1
   * i.e. an underflow bucket (number 0), zero or more finite buckets (1
   * through N - 2, and an overflow bucket (N - 1), with inclusive lower
   * bounds and exclusive upper bounds.
   * If `bucket_bounds` has no elements (zero size), then there is no
   * histogram associated with the Distribution. If `bucket_bounds` has only
   * one element, there are no finite buckets, and that single element is the
   * common boundary of the overflow and underflow buckets. The values must
   * be monotonically increasing.
   * 
* * repeated double bucket_bounds = 1; * @param index The index of the element to return. * @return The bucketBounds at the given index. */ public double getBucketBounds(int index) { return bucketBounds_.getDouble(index); } private int bucketBoundsMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (getBucketBoundsList().size() > 0) { output.writeUInt32NoTag(10); output.writeUInt32NoTag(bucketBoundsMemoizedSerializedSize); } for (int i = 0; i < bucketBounds_.size(); i++) { output.writeDoubleNoTag(bucketBounds_.getDouble(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; dataSize = 8 * getBucketBoundsList().size(); size += dataSize; if (!getBucketBoundsList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } bucketBoundsMemoizedSerializedSize = dataSize; } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof io.opencensus.proto.stats.v1.DistributionAggregation)) { return super.equals(obj); } io.opencensus.proto.stats.v1.DistributionAggregation other = (io.opencensus.proto.stats.v1.DistributionAggregation) obj; if (!getBucketBoundsList() .equals(other.getBucketBoundsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getBucketBoundsCount() > 0) { hash = (37 * hash) + BUCKET_BOUNDS_FIELD_NUMBER; hash = (53 * hash) + getBucketBoundsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.opencensus.proto.stats.v1.DistributionAggregation parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(io.opencensus.proto.stats.v1.DistributionAggregation prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code opencensus.proto.stats.v1.DistributionAggregation} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:opencensus.proto.stats.v1.DistributionAggregation) io.opencensus.proto.stats.v1.DistributionAggregationOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_fieldAccessorTable .ensureFieldAccessorsInitialized( io.opencensus.proto.stats.v1.DistributionAggregation.class, io.opencensus.proto.stats.v1.DistributionAggregation.Builder.class); } // Construct using io.opencensus.proto.stats.v1.DistributionAggregation.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); bucketBounds_ = emptyDoubleList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.opencensus.proto.stats.v1.StatsProto.internal_static_opencensus_proto_stats_v1_DistributionAggregation_descriptor; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstanceForType() { return io.opencensus.proto.stats.v1.DistributionAggregation.getDefaultInstance(); } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation build() { io.opencensus.proto.stats.v1.DistributionAggregation result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation buildPartial() { io.opencensus.proto.stats.v1.DistributionAggregation result = new io.opencensus.proto.stats.v1.DistributionAggregation(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) != 0)) { bucketBounds_.makeImmutable(); bitField0_ = (bitField0_ & ~0x00000001); } result.bucketBounds_ = bucketBounds_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.opencensus.proto.stats.v1.DistributionAggregation) { return mergeFrom((io.opencensus.proto.stats.v1.DistributionAggregation)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.opencensus.proto.stats.v1.DistributionAggregation other) { if (other == io.opencensus.proto.stats.v1.DistributionAggregation.getDefaultInstance()) return this; if (!other.bucketBounds_.isEmpty()) { if (bucketBounds_.isEmpty()) { bucketBounds_ = other.bucketBounds_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureBucketBoundsIsMutable(); bucketBounds_.addAll(other.bucketBounds_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.opencensus.proto.stats.v1.DistributionAggregation parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.opencensus.proto.stats.v1.DistributionAggregation) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private com.google.protobuf.Internal.DoubleList bucketBounds_ = emptyDoubleList(); private void ensureBucketBoundsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { bucketBounds_ = mutableCopy(bucketBounds_); bitField0_ |= 0x00000001; } } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return A list containing the bucketBounds. */ public java.util.List getBucketBoundsList() { return ((bitField0_ & 0x00000001) != 0) ? java.util.Collections.unmodifiableList(bucketBounds_) : bucketBounds_; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return The count of bucketBounds. */ public int getBucketBoundsCount() { return bucketBounds_.size(); } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param index The index of the element to return. * @return The bucketBounds at the given index. */ public double getBucketBounds(int index) { return bucketBounds_.getDouble(index); } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param index The index to set the value at. * @param value The bucketBounds to set. * @return This builder for chaining. */ public Builder setBucketBounds( int index, double value) { ensureBucketBoundsIsMutable(); bucketBounds_.setDouble(index, value); onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param value The bucketBounds to add. * @return This builder for chaining. */ public Builder addBucketBounds(double value) { ensureBucketBoundsIsMutable(); bucketBounds_.addDouble(value); onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @param values The bucketBounds to add. * @return This builder for chaining. */ public Builder addAllBucketBounds( java.lang.Iterable values) { ensureBucketBoundsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, bucketBounds_); onChanged(); return this; } /** *
     * A Distribution may optionally contain a histogram of the values in the
     * population. The bucket boundaries for that histogram are described by
     * `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N)
     * buckets. The boundaries for bucket index i are:
     * (-infinity, bucket_bounds[i]) for i == 0
     * [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2
     * [bucket_bounds[i-1], +infinity) for i == N-1
     * i.e. an underflow bucket (number 0), zero or more finite buckets (1
     * through N - 2, and an overflow bucket (N - 1), with inclusive lower
     * bounds and exclusive upper bounds.
     * If `bucket_bounds` has no elements (zero size), then there is no
     * histogram associated with the Distribution. If `bucket_bounds` has only
     * one element, there are no finite buckets, and that single element is the
     * common boundary of the overflow and underflow buckets. The values must
     * be monotonically increasing.
     * 
* * repeated double bucket_bounds = 1; * @return This builder for chaining. */ public Builder clearBucketBounds() { bucketBounds_ = emptyDoubleList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:opencensus.proto.stats.v1.DistributionAggregation) } // @@protoc_insertion_point(class_scope:opencensus.proto.stats.v1.DistributionAggregation) private static final io.opencensus.proto.stats.v1.DistributionAggregation DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new io.opencensus.proto.stats.v1.DistributionAggregation(); } public static io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public DistributionAggregation parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DistributionAggregation(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public io.opencensus.proto.stats.v1.DistributionAggregation getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy