Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/summary.proto
package org.tensorflow.framework;
/**
*
* Serialization format for histogram module in
* core/lib/histogram/histogram.h
*
*
* Protobuf type {@code tensorflow.HistogramProto}
*/
public final class HistogramProto extends
com.github.os72.protobuf351.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.HistogramProto)
HistogramProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use HistogramProto.newBuilder() to construct.
private HistogramProto(com.github.os72.protobuf351.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private HistogramProto() {
min_ = 0D;
max_ = 0D;
num_ = 0D;
sum_ = 0D;
sumSquares_ = 0D;
bucketLimit_ = java.util.Collections.emptyList();
bucket_ = java.util.Collections.emptyList();
}
@java.lang.Override
public final com.github.os72.protobuf351.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HistogramProto(
com.github.os72.protobuf351.CodedInputStream input,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.github.os72.protobuf351.UnknownFieldSet.Builder unknownFields =
com.github.os72.protobuf351.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
case 9: {
min_ = input.readDouble();
break;
}
case 17: {
max_ = input.readDouble();
break;
}
case 25: {
num_ = input.readDouble();
break;
}
case 33: {
sum_ = input.readDouble();
break;
}
case 41: {
sumSquares_ = input.readDouble();
break;
}
case 49: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
bucketLimit_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
bucketLimit_.add(input.readDouble());
break;
}
case 50: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020) && input.getBytesUntilLimit() > 0) {
bucketLimit_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
while (input.getBytesUntilLimit() > 0) {
bucketLimit_.add(input.readDouble());
}
input.popLimit(limit);
break;
}
case 57: {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
bucket_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
bucket_.add(input.readDouble());
break;
}
case 58: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) {
bucket_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
while (input.getBytesUntilLimit() > 0) {
bucket_.add(input.readDouble());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.github.os72.protobuf351.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.github.os72.protobuf351.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
bucketLimit_ = java.util.Collections.unmodifiableList(bucketLimit_);
}
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
bucket_ = java.util.Collections.unmodifiableList(bucket_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.github.os72.protobuf351.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.SummaryProtos.internal_static_tensorflow_HistogramProto_descriptor;
}
protected com.github.os72.protobuf351.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.SummaryProtos.internal_static_tensorflow_HistogramProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.HistogramProto.class, org.tensorflow.framework.HistogramProto.Builder.class);
}
private int bitField0_;
public static final int MIN_FIELD_NUMBER = 1;
private double min_;
/**
* double min = 1;
*/
public double getMin() {
return min_;
}
public static final int MAX_FIELD_NUMBER = 2;
private double max_;
/**
* double max = 2;
*/
public double getMax() {
return max_;
}
public static final int NUM_FIELD_NUMBER = 3;
private double num_;
/**
* double num = 3;
*/
public double getNum() {
return num_;
}
public static final int SUM_FIELD_NUMBER = 4;
private double sum_;
/**
* double sum = 4;
*/
public double getSum() {
return sum_;
}
public static final int SUM_SQUARES_FIELD_NUMBER = 5;
private double sumSquares_;
/**
* double sum_squares = 5;
*/
public double getSumSquares() {
return sumSquares_;
}
public static final int BUCKET_LIMIT_FIELD_NUMBER = 6;
private java.util.List bucketLimit_;
/**
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
*
* repeated double bucket_limit = 6 [packed = true];
*/
public int getBucketLimitCount() {
return bucketLimit_.size();
}
/**
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
*
* repeated double bucket_limit = 6 [packed = true];
*/
public double getBucketLimit(int index) {
return bucketLimit_.get(index);
}
private int bucketLimitMemoizedSerializedSize = -1;
public static final int BUCKET_FIELD_NUMBER = 7;
private java.util.List bucket_;
/**
* repeated double bucket = 7 [packed = true];
*/
public java.util.List
getBucketList() {
return bucket_;
}
/**
* repeated double bucket = 7 [packed = true];
*/
public int getBucketCount() {
return bucket_.size();
}
/**
* repeated double bucket = 7 [packed = true];
*/
public double getBucket(int index) {
return bucket_.get(index);
}
private int bucketMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.github.os72.protobuf351.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (min_ != 0D) {
output.writeDouble(1, min_);
}
if (max_ != 0D) {
output.writeDouble(2, max_);
}
if (num_ != 0D) {
output.writeDouble(3, num_);
}
if (sum_ != 0D) {
output.writeDouble(4, sum_);
}
if (sumSquares_ != 0D) {
output.writeDouble(5, sumSquares_);
}
if (getBucketLimitList().size() > 0) {
output.writeUInt32NoTag(50);
output.writeUInt32NoTag(bucketLimitMemoizedSerializedSize);
}
for (int i = 0; i < bucketLimit_.size(); i++) {
output.writeDoubleNoTag(bucketLimit_.get(i));
}
if (getBucketList().size() > 0) {
output.writeUInt32NoTag(58);
output.writeUInt32NoTag(bucketMemoizedSerializedSize);
}
for (int i = 0; i < bucket_.size(); i++) {
output.writeDoubleNoTag(bucket_.get(i));
}
unknownFields.writeTo(output);
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (min_ != 0D) {
size += com.github.os72.protobuf351.CodedOutputStream
.computeDoubleSize(1, min_);
}
if (max_ != 0D) {
size += com.github.os72.protobuf351.CodedOutputStream
.computeDoubleSize(2, max_);
}
if (num_ != 0D) {
size += com.github.os72.protobuf351.CodedOutputStream
.computeDoubleSize(3, num_);
}
if (sum_ != 0D) {
size += com.github.os72.protobuf351.CodedOutputStream
.computeDoubleSize(4, sum_);
}
if (sumSquares_ != 0D) {
size += com.github.os72.protobuf351.CodedOutputStream
.computeDoubleSize(5, sumSquares_);
}
{
int dataSize = 0;
dataSize = 8 * getBucketLimitList().size();
size += dataSize;
if (!getBucketLimitList().isEmpty()) {
size += 1;
size += com.github.os72.protobuf351.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
bucketLimitMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
dataSize = 8 * getBucketList().size();
size += dataSize;
if (!getBucketList().isEmpty()) {
size += 1;
size += com.github.os72.protobuf351.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
bucketMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.HistogramProto)) {
return super.equals(obj);
}
org.tensorflow.framework.HistogramProto other = (org.tensorflow.framework.HistogramProto) obj;
boolean result = true;
result = result && (
java.lang.Double.doubleToLongBits(getMin())
== java.lang.Double.doubleToLongBits(
other.getMin()));
result = result && (
java.lang.Double.doubleToLongBits(getMax())
== java.lang.Double.doubleToLongBits(
other.getMax()));
result = result && (
java.lang.Double.doubleToLongBits(getNum())
== java.lang.Double.doubleToLongBits(
other.getNum()));
result = result && (
java.lang.Double.doubleToLongBits(getSum())
== java.lang.Double.doubleToLongBits(
other.getSum()));
result = result && (
java.lang.Double.doubleToLongBits(getSumSquares())
== java.lang.Double.doubleToLongBits(
other.getSumSquares()));
result = result && getBucketLimitList()
.equals(other.getBucketLimitList());
result = result && getBucketList()
.equals(other.getBucketList());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + MIN_FIELD_NUMBER;
hash = (53 * hash) + com.github.os72.protobuf351.Internal.hashLong(
java.lang.Double.doubleToLongBits(getMin()));
hash = (37 * hash) + MAX_FIELD_NUMBER;
hash = (53 * hash) + com.github.os72.protobuf351.Internal.hashLong(
java.lang.Double.doubleToLongBits(getMax()));
hash = (37 * hash) + NUM_FIELD_NUMBER;
hash = (53 * hash) + com.github.os72.protobuf351.Internal.hashLong(
java.lang.Double.doubleToLongBits(getNum()));
hash = (37 * hash) + SUM_FIELD_NUMBER;
hash = (53 * hash) + com.github.os72.protobuf351.Internal.hashLong(
java.lang.Double.doubleToLongBits(getSum()));
hash = (37 * hash) + SUM_SQUARES_FIELD_NUMBER;
hash = (53 * hash) + com.github.os72.protobuf351.Internal.hashLong(
java.lang.Double.doubleToLongBits(getSumSquares()));
if (getBucketLimitCount() > 0) {
hash = (37 * hash) + BUCKET_LIMIT_FIELD_NUMBER;
hash = (53 * hash) + getBucketLimitList().hashCode();
}
if (getBucketCount() > 0) {
hash = (37 * hash) + BUCKET_FIELD_NUMBER;
hash = (53 * hash) + getBucketList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.HistogramProto parseFrom(
java.nio.ByteBuffer data)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
java.nio.ByteBuffer data,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
com.github.os72.protobuf351.ByteString data)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
com.github.os72.protobuf351.ByteString data,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.HistogramProto parseFrom(byte[] data)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
byte[] data,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws com.github.os72.protobuf351.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.HistogramProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
java.io.InputStream input,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.HistogramProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.HistogramProto parseDelimitedFrom(
java.io.InputStream input,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
com.github.os72.protobuf351.CodedInputStream input)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.HistogramProto parseFrom(
com.github.os72.protobuf351.CodedInputStream input,
com.github.os72.protobuf351.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.github.os72.protobuf351.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.HistogramProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.github.os72.protobuf351.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Serialization format for histogram module in
* core/lib/histogram/histogram.h
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
*
* repeated double bucket_limit = 6 [packed = true];
*/
public int getBucketLimitCount() {
return bucketLimit_.size();
}
/**
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*
* Parallel arrays encoding the bucket boundaries and the bucket values.
* bucket(i) is the count for the bucket i. The range for
* a bucket is:
* i == 0: -DBL_MAX .. bucket_limit(0)
* i != 0: bucket_limit(i-1) .. bucket_limit(i)
*