org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hadoop-apache Show documentation
Show all versions of hadoop-apache Show documentation
Shaded version of Apache Hadoop for Presto
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: HdfsServer.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class HdfsServerProtos {
private HdfsServerProtos() {}
public static void registerAllExtensions(
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hadoop.hdfs.ReplicaStateProto}
*
*
**
* State of a block replica at a datanode
*
*/
public enum ReplicaStateProto
implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum {
/**
* FINALIZED = 0;
*
*
* State of a replica when it is not modified
*
*/
FINALIZED(0, 0),
/**
* RBW = 1;
*
*
* State of replica that is being written to
*
*/
RBW(1, 1),
/**
* RWR = 2;
*
*
* State of replica that is waiting to be recovered
*
*/
RWR(2, 2),
/**
* RUR = 3;
*
*
* State of replica that is under recovery
*
*/
RUR(3, 3),
/**
* TEMPORARY = 4;
*
*
* State of replica that is created for replication
*
*/
TEMPORARY(4, 4),
;
/**
* FINALIZED = 0;
*
*
* State of a replica when it is not modified
*
*/
public static final int FINALIZED_VALUE = 0;
/**
* RBW = 1;
*
*
* State of replica that is being written to
*
*/
public static final int RBW_VALUE = 1;
/**
* RWR = 2;
*
*
* State of replica that is waiting to be recovered
*
*/
public static final int RWR_VALUE = 2;
/**
* RUR = 3;
*
*
* State of replica that is under recovery
*
*/
public static final int RUR_VALUE = 3;
/**
* TEMPORARY = 4;
*
*
* State of replica that is created for replication
*
*/
public static final int TEMPORARY_VALUE = 4;
public final int getNumber() { return value; }
public static ReplicaStateProto valueOf(int value) {
switch (value) {
case 0: return FINALIZED;
case 1: return RBW;
case 2: return RWR;
case 3: return RUR;
case 4: return TEMPORARY;
default: return null;
}
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() {
public ReplicaStateProto findValueByNumber(int number) {
return ReplicaStateProto.valueOf(number);
}
};
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor().getEnumTypes().get(0);
}
private static final ReplicaStateProto[] VALUES = values();
public static ReplicaStateProto valueOf(
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ReplicaStateProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ReplicaStateProto)
}
public interface BlockKeyProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required uint32 keyId = 1;
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
boolean hasKeyId();
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
int getKeyId();
// required uint64 expiryDate = 2;
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
boolean hasExpiryDate();
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
long getExpiryDate();
// optional bytes keyBytes = 3;
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
boolean hasKeyBytes();
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getKeyBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockKeyProto}
*
*
**
* Block access token information
*
*/
public static final class BlockKeyProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements BlockKeyProtoOrBuilder {
// Use BlockKeyProto.newBuilder() to construct.
private BlockKeyProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockKeyProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockKeyProto defaultInstance;
public static BlockKeyProto getDefaultInstance() {
return defaultInstance;
}
public BlockKeyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockKeyProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
keyId_ = input.readUInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
expiryDate_ = input.readUInt64();
break;
}
case 26: {
bitField0_ |= 0x00000004;
keyBytes_ = input.readBytes();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public BlockKeyProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new BlockKeyProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 keyId = 1;
public static final int KEYID_FIELD_NUMBER = 1;
private int keyId_;
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public int getKeyId() {
return keyId_;
}
// required uint64 expiryDate = 2;
public static final int EXPIRYDATE_FIELD_NUMBER = 2;
private long expiryDate_;
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public long getExpiryDate() {
return expiryDate_;
}
// optional bytes keyBytes = 3;
public static final int KEYBYTES_FIELD_NUMBER = 3;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString keyBytes_;
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public boolean hasKeyBytes() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getKeyBytes() {
return keyBytes_;
}
private void initFields() {
keyId_ = 0;
expiryDate_ = 0L;
keyBytes_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKeyId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasExpiryDate()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, expiryDate_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, keyBytes_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, expiryDate_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(3, keyBytes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto) obj;
boolean result = true;
result = result && (hasKeyId() == other.hasKeyId());
if (hasKeyId()) {
result = result && (getKeyId()
== other.getKeyId());
}
result = result && (hasExpiryDate() == other.hasExpiryDate());
if (hasExpiryDate()) {
result = result && (getExpiryDate()
== other.getExpiryDate());
}
result = result && (hasKeyBytes() == other.hasKeyBytes());
if (hasKeyBytes()) {
result = result && getKeyBytes()
.equals(other.getKeyBytes());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKeyId()) {
hash = (37 * hash) + KEYID_FIELD_NUMBER;
hash = (53 * hash) + getKeyId();
}
if (hasExpiryDate()) {
hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getExpiryDate());
}
if (hasKeyBytes()) {
hash = (37 * hash) + KEYBYTES_FIELD_NUMBER;
hash = (53 * hash) + getKeyBytes().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockKeyProto}
*
*
**
* Block access token information
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
keyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
expiryDate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
keyBytes_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.keyId_ = keyId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.expiryDate_ = expiryDate_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.keyBytes_ = keyBytes_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance()) return this;
if (other.hasKeyId()) {
setKeyId(other.getKeyId());
}
if (other.hasExpiryDate()) {
setExpiryDate(other.getExpiryDate());
}
if (other.hasKeyBytes()) {
setKeyBytes(other.getKeyBytes());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKeyId()) {
return false;
}
if (!hasExpiryDate()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 keyId = 1;
private int keyId_ ;
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public int getKeyId() {
return keyId_;
}
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public Builder setKeyId(int value) {
bitField0_ |= 0x00000001;
keyId_ = value;
onChanged();
return this;
}
/**
* required uint32 keyId = 1;
*
*
* Key identifier
*
*/
public Builder clearKeyId() {
bitField0_ = (bitField0_ & ~0x00000001);
keyId_ = 0;
onChanged();
return this;
}
// required uint64 expiryDate = 2;
private long expiryDate_ ;
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public long getExpiryDate() {
return expiryDate_;
}
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public Builder setExpiryDate(long value) {
bitField0_ |= 0x00000002;
expiryDate_ = value;
onChanged();
return this;
}
/**
* required uint64 expiryDate = 2;
*
*
* Expiry time in milliseconds
*
*/
public Builder clearExpiryDate() {
bitField0_ = (bitField0_ & ~0x00000002);
expiryDate_ = 0L;
onChanged();
return this;
}
// optional bytes keyBytes = 3;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString keyBytes_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public boolean hasKeyBytes() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getKeyBytes() {
return keyBytes_;
}
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public Builder setKeyBytes(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
keyBytes_ = value;
onChanged();
return this;
}
/**
* optional bytes keyBytes = 3;
*
*
* Key secret
*
*/
public Builder clearKeyBytes() {
bitField0_ = (bitField0_ & ~0x00000004);
keyBytes_ = getDefaultInstance().getKeyBytes();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockKeyProto)
}
static {
defaultInstance = new BlockKeyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockKeyProto)
}
public interface ExportedBlockKeysProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required bool isBlockTokenEnabled = 1;
/**
* required bool isBlockTokenEnabled = 1;
*/
boolean hasIsBlockTokenEnabled();
/**
* required bool isBlockTokenEnabled = 1;
*/
boolean getIsBlockTokenEnabled();
// required uint64 keyUpdateInterval = 2;
/**
* required uint64 keyUpdateInterval = 2;
*/
boolean hasKeyUpdateInterval();
/**
* required uint64 keyUpdateInterval = 2;
*/
long getKeyUpdateInterval();
// required uint64 tokenLifeTime = 3;
/**
* required uint64 tokenLifeTime = 3;
*/
boolean hasTokenLifeTime();
/**
* required uint64 tokenLifeTime = 3;
*/
long getTokenLifeTime();
// required .hadoop.hdfs.BlockKeyProto currentKey = 4;
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
boolean hasCurrentKey();
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getCurrentKey();
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder();
// repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
java.util.List
getAllKeysList();
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getAllKeys(int index);
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
int getAllKeysCount();
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>
getAllKeysOrBuilderList();
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.ExportedBlockKeysProto}
*
*
**
* Current key and set of block keys at the namenode.
*
*/
public static final class ExportedBlockKeysProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements ExportedBlockKeysProtoOrBuilder {
// Use ExportedBlockKeysProto.newBuilder() to construct.
private ExportedBlockKeysProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ExportedBlockKeysProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ExportedBlockKeysProto defaultInstance;
public static ExportedBlockKeysProto getDefaultInstance() {
return defaultInstance;
}
public ExportedBlockKeysProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ExportedBlockKeysProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
isBlockTokenEnabled_ = input.readBool();
break;
}
case 16: {
bitField0_ |= 0x00000002;
keyUpdateInterval_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
tokenLifeTime_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = currentKey_.toBuilder();
}
currentKey_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(currentKey_);
currentKey_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
allKeys_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
allKeys_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
allKeys_ = java.util.Collections.unmodifiableList(allKeys_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public ExportedBlockKeysProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new ExportedBlockKeysProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool isBlockTokenEnabled = 1;
public static final int ISBLOCKTOKENENABLED_FIELD_NUMBER = 1;
private boolean isBlockTokenEnabled_;
/**
* required bool isBlockTokenEnabled = 1;
*/
public boolean hasIsBlockTokenEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool isBlockTokenEnabled = 1;
*/
public boolean getIsBlockTokenEnabled() {
return isBlockTokenEnabled_;
}
// required uint64 keyUpdateInterval = 2;
public static final int KEYUPDATEINTERVAL_FIELD_NUMBER = 2;
private long keyUpdateInterval_;
/**
* required uint64 keyUpdateInterval = 2;
*/
public boolean hasKeyUpdateInterval() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 keyUpdateInterval = 2;
*/
public long getKeyUpdateInterval() {
return keyUpdateInterval_;
}
// required uint64 tokenLifeTime = 3;
public static final int TOKENLIFETIME_FIELD_NUMBER = 3;
private long tokenLifeTime_;
/**
* required uint64 tokenLifeTime = 3;
*/
public boolean hasTokenLifeTime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 tokenLifeTime = 3;
*/
public long getTokenLifeTime() {
return tokenLifeTime_;
}
// required .hadoop.hdfs.BlockKeyProto currentKey = 4;
public static final int CURRENTKEY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto currentKey_;
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public boolean hasCurrentKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getCurrentKey() {
return currentKey_;
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() {
return currentKey_;
}
// repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
public static final int ALLKEYS_FIELD_NUMBER = 5;
private java.util.List allKeys_;
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public java.util.List getAllKeysList() {
return allKeys_;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>
getAllKeysOrBuilderList() {
return allKeys_;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public int getAllKeysCount() {
return allKeys_.size();
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getAllKeys(int index) {
return allKeys_.get(index);
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder(
int index) {
return allKeys_.get(index);
}
private void initFields() {
isBlockTokenEnabled_ = false;
keyUpdateInterval_ = 0L;
tokenLifeTime_ = 0L;
currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance();
allKeys_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasIsBlockTokenEnabled()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeyUpdateInterval()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTokenLifeTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCurrentKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!getCurrentKey().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getAllKeysCount(); i++) {
if (!getAllKeys(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, isBlockTokenEnabled_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, keyUpdateInterval_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, tokenLifeTime_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, currentKey_);
}
for (int i = 0; i < allKeys_.size(); i++) {
output.writeMessage(5, allKeys_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBoolSize(1, isBlockTokenEnabled_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, keyUpdateInterval_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, tokenLifeTime_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(4, currentKey_);
}
for (int i = 0; i < allKeys_.size(); i++) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(5, allKeys_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto) obj;
boolean result = true;
result = result && (hasIsBlockTokenEnabled() == other.hasIsBlockTokenEnabled());
if (hasIsBlockTokenEnabled()) {
result = result && (getIsBlockTokenEnabled()
== other.getIsBlockTokenEnabled());
}
result = result && (hasKeyUpdateInterval() == other.hasKeyUpdateInterval());
if (hasKeyUpdateInterval()) {
result = result && (getKeyUpdateInterval()
== other.getKeyUpdateInterval());
}
result = result && (hasTokenLifeTime() == other.hasTokenLifeTime());
if (hasTokenLifeTime()) {
result = result && (getTokenLifeTime()
== other.getTokenLifeTime());
}
result = result && (hasCurrentKey() == other.hasCurrentKey());
if (hasCurrentKey()) {
result = result && getCurrentKey()
.equals(other.getCurrentKey());
}
result = result && getAllKeysList()
.equals(other.getAllKeysList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasIsBlockTokenEnabled()) {
hash = (37 * hash) + ISBLOCKTOKENENABLED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIsBlockTokenEnabled());
}
if (hasKeyUpdateInterval()) {
hash = (37 * hash) + KEYUPDATEINTERVAL_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getKeyUpdateInterval());
}
if (hasTokenLifeTime()) {
hash = (37 * hash) + TOKENLIFETIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTokenLifeTime());
}
if (hasCurrentKey()) {
hash = (37 * hash) + CURRENTKEY_FIELD_NUMBER;
hash = (53 * hash) + getCurrentKey().hashCode();
}
if (getAllKeysCount() > 0) {
hash = (37 * hash) + ALLKEYS_FIELD_NUMBER;
hash = (53 * hash) + getAllKeysList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ExportedBlockKeysProto}
*
*
**
* Current key and set of block keys at the namenode.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCurrentKeyFieldBuilder();
getAllKeysFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
isBlockTokenEnabled_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
keyUpdateInterval_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
tokenLifeTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
if (currentKeyBuilder_ == null) {
currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance();
} else {
currentKeyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
if (allKeysBuilder_ == null) {
allKeys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
allKeysBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.isBlockTokenEnabled_ = isBlockTokenEnabled_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.keyUpdateInterval_ = keyUpdateInterval_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.tokenLifeTime_ = tokenLifeTime_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (currentKeyBuilder_ == null) {
result.currentKey_ = currentKey_;
} else {
result.currentKey_ = currentKeyBuilder_.build();
}
if (allKeysBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
allKeys_ = java.util.Collections.unmodifiableList(allKeys_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.allKeys_ = allKeys_;
} else {
result.allKeys_ = allKeysBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) return this;
if (other.hasIsBlockTokenEnabled()) {
setIsBlockTokenEnabled(other.getIsBlockTokenEnabled());
}
if (other.hasKeyUpdateInterval()) {
setKeyUpdateInterval(other.getKeyUpdateInterval());
}
if (other.hasTokenLifeTime()) {
setTokenLifeTime(other.getTokenLifeTime());
}
if (other.hasCurrentKey()) {
mergeCurrentKey(other.getCurrentKey());
}
if (allKeysBuilder_ == null) {
if (!other.allKeys_.isEmpty()) {
if (allKeys_.isEmpty()) {
allKeys_ = other.allKeys_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureAllKeysIsMutable();
allKeys_.addAll(other.allKeys_);
}
onChanged();
}
} else {
if (!other.allKeys_.isEmpty()) {
if (allKeysBuilder_.isEmpty()) {
allKeysBuilder_.dispose();
allKeysBuilder_ = null;
allKeys_ = other.allKeys_;
bitField0_ = (bitField0_ & ~0x00000010);
allKeysBuilder_ =
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getAllKeysFieldBuilder() : null;
} else {
allKeysBuilder_.addAllMessages(other.allKeys_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasIsBlockTokenEnabled()) {
return false;
}
if (!hasKeyUpdateInterval()) {
return false;
}
if (!hasTokenLifeTime()) {
return false;
}
if (!hasCurrentKey()) {
return false;
}
if (!getCurrentKey().isInitialized()) {
return false;
}
for (int i = 0; i < getAllKeysCount(); i++) {
if (!getAllKeys(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool isBlockTokenEnabled = 1;
private boolean isBlockTokenEnabled_ ;
/**
* required bool isBlockTokenEnabled = 1;
*/
public boolean hasIsBlockTokenEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool isBlockTokenEnabled = 1;
*/
public boolean getIsBlockTokenEnabled() {
return isBlockTokenEnabled_;
}
/**
* required bool isBlockTokenEnabled = 1;
*/
public Builder setIsBlockTokenEnabled(boolean value) {
bitField0_ |= 0x00000001;
isBlockTokenEnabled_ = value;
onChanged();
return this;
}
/**
* required bool isBlockTokenEnabled = 1;
*/
public Builder clearIsBlockTokenEnabled() {
bitField0_ = (bitField0_ & ~0x00000001);
isBlockTokenEnabled_ = false;
onChanged();
return this;
}
// required uint64 keyUpdateInterval = 2;
private long keyUpdateInterval_ ;
/**
* required uint64 keyUpdateInterval = 2;
*/
public boolean hasKeyUpdateInterval() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 keyUpdateInterval = 2;
*/
public long getKeyUpdateInterval() {
return keyUpdateInterval_;
}
/**
* required uint64 keyUpdateInterval = 2;
*/
public Builder setKeyUpdateInterval(long value) {
bitField0_ |= 0x00000002;
keyUpdateInterval_ = value;
onChanged();
return this;
}
/**
* required uint64 keyUpdateInterval = 2;
*/
public Builder clearKeyUpdateInterval() {
bitField0_ = (bitField0_ & ~0x00000002);
keyUpdateInterval_ = 0L;
onChanged();
return this;
}
// required uint64 tokenLifeTime = 3;
private long tokenLifeTime_ ;
/**
* required uint64 tokenLifeTime = 3;
*/
public boolean hasTokenLifeTime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 tokenLifeTime = 3;
*/
public long getTokenLifeTime() {
return tokenLifeTime_;
}
/**
* required uint64 tokenLifeTime = 3;
*/
public Builder setTokenLifeTime(long value) {
bitField0_ |= 0x00000004;
tokenLifeTime_ = value;
onChanged();
return this;
}
/**
* required uint64 tokenLifeTime = 3;
*/
public Builder clearTokenLifeTime() {
bitField0_ = (bitField0_ & ~0x00000004);
tokenLifeTime_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.BlockKeyProto currentKey = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder> currentKeyBuilder_;
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public boolean hasCurrentKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getCurrentKey() {
if (currentKeyBuilder_ == null) {
return currentKey_;
} else {
return currentKeyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public Builder setCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto value) {
if (currentKeyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
currentKey_ = value;
onChanged();
} else {
currentKeyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public Builder setCurrentKey(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder builderForValue) {
if (currentKeyBuilder_ == null) {
currentKey_ = builderForValue.build();
onChanged();
} else {
currentKeyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public Builder mergeCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto value) {
if (currentKeyBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
currentKey_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance()) {
currentKey_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.newBuilder(currentKey_).mergeFrom(value).buildPartial();
} else {
currentKey_ = value;
}
onChanged();
} else {
currentKeyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public Builder clearCurrentKey() {
if (currentKeyBuilder_ == null) {
currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance();
onChanged();
} else {
currentKeyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder getCurrentKeyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getCurrentKeyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() {
if (currentKeyBuilder_ != null) {
return currentKeyBuilder_.getMessageOrBuilder();
} else {
return currentKey_;
}
}
/**
* required .hadoop.hdfs.BlockKeyProto currentKey = 4;
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>
getCurrentKeyFieldBuilder() {
if (currentKeyBuilder_ == null) {
currentKeyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>(
currentKey_,
getParentForChildren(),
isClean());
currentKey_ = null;
}
return currentKeyBuilder_;
}
// repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
private java.util.List allKeys_ =
java.util.Collections.emptyList();
private void ensureAllKeysIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
allKeys_ = new java.util.ArrayList(allKeys_);
bitField0_ |= 0x00000010;
}
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder> allKeysBuilder_;
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public java.util.List getAllKeysList() {
if (allKeysBuilder_ == null) {
return java.util.Collections.unmodifiableList(allKeys_);
} else {
return allKeysBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public int getAllKeysCount() {
if (allKeysBuilder_ == null) {
return allKeys_.size();
} else {
return allKeysBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto getAllKeys(int index) {
if (allKeysBuilder_ == null) {
return allKeys_.get(index);
} else {
return allKeysBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder setAllKeys(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto value) {
if (allKeysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAllKeysIsMutable();
allKeys_.set(index, value);
onChanged();
} else {
allKeysBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder setAllKeys(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder builderForValue) {
if (allKeysBuilder_ == null) {
ensureAllKeysIsMutable();
allKeys_.set(index, builderForValue.build());
onChanged();
} else {
allKeysBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder addAllKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto value) {
if (allKeysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAllKeysIsMutable();
allKeys_.add(value);
onChanged();
} else {
allKeysBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder addAllKeys(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto value) {
if (allKeysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureAllKeysIsMutable();
allKeys_.add(index, value);
onChanged();
} else {
allKeysBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder addAllKeys(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder builderForValue) {
if (allKeysBuilder_ == null) {
ensureAllKeysIsMutable();
allKeys_.add(builderForValue.build());
onChanged();
} else {
allKeysBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder addAllKeys(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder builderForValue) {
if (allKeysBuilder_ == null) {
ensureAllKeysIsMutable();
allKeys_.add(index, builderForValue.build());
onChanged();
} else {
allKeysBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder addAllAllKeys(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto> values) {
if (allKeysBuilder_ == null) {
ensureAllKeysIsMutable();
super.addAll(values, allKeys_);
onChanged();
} else {
allKeysBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder clearAllKeys() {
if (allKeysBuilder_ == null) {
allKeys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
allKeysBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public Builder removeAllKeys(int index) {
if (allKeysBuilder_ == null) {
ensureAllKeysIsMutable();
allKeys_.remove(index);
onChanged();
} else {
allKeysBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder getAllKeysBuilder(
int index) {
return getAllKeysFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder(
int index) {
if (allKeysBuilder_ == null) {
return allKeys_.get(index); } else {
return allKeysBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>
getAllKeysOrBuilderList() {
if (allKeysBuilder_ != null) {
return allKeysBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(allKeys_);
}
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder addAllKeysBuilder() {
return getAllKeysFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder addAllKeysBuilder(
int index) {
return getAllKeysFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockKeyProto allKeys = 5;
*/
public java.util.List
getAllKeysBuilderList() {
return getAllKeysFieldBuilder().getBuilderList();
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>
getAllKeysFieldBuilder() {
if (allKeysBuilder_ == null) {
allKeysBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProtoOrBuilder>(
allKeys_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
allKeys_ = null;
}
return allKeysBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExportedBlockKeysProto)
}
static {
defaultInstance = new ExportedBlockKeysProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ExportedBlockKeysProto)
}
public interface BlockWithLocationsProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BlockProto block = 1;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
// repeated string datanodeUuids = 2;
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
java.util.List
getDatanodeUuidsList();
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
int getDatanodeUuidsCount();
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
java.lang.String getDatanodeUuids(int index);
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getDatanodeUuidsBytes(int index);
// repeated string storageUuids = 3;
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
java.util.List
getStorageUuidsList();
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
int getStorageUuidsCount();
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
java.lang.String getStorageUuids(int index);
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getStorageUuidsBytes(int index);
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
java.util.List getStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
int getStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
// optional bytes indices = 5;
/**
* optional bytes indices = 5;
*/
boolean hasIndices();
/**
* optional bytes indices = 5;
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getIndices();
// optional uint32 dataBlockNum = 6;
/**
* optional uint32 dataBlockNum = 6;
*/
boolean hasDataBlockNum();
/**
* optional uint32 dataBlockNum = 6;
*/
int getDataBlockNum();
// optional uint32 cellSize = 7;
/**
* optional uint32 cellSize = 7;
*/
boolean hasCellSize();
/**
* optional uint32 cellSize = 7;
*/
int getCellSize();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockWithLocationsProto}
*
*
**
* Block and datanodes where is it located
*
*/
public static final class BlockWithLocationsProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements BlockWithLocationsProtoOrBuilder {
// Use BlockWithLocationsProto.newBuilder() to construct.
private BlockWithLocationsProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockWithLocationsProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockWithLocationsProto defaultInstance;
public static BlockWithLocationsProto getDefaultInstance() {
return defaultInstance;
}
public BlockWithLocationsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockWithLocationsProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
datanodeUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000002;
}
datanodeUuids_.add(input.readBytes());
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
storageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000004;
}
storageUuids_.add(input.readBytes());
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
storageTypes_.add(value);
}
break;
}
case 34: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
storageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 42: {
bitField0_ |= 0x00000002;
indices_ = input.readBytes();
break;
}
case 48: {
bitField0_ |= 0x00000004;
dataBlockNum_ = input.readUInt32();
break;
}
case 56: {
bitField0_ |= 0x00000008;
cellSize_ = input.readUInt32();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
datanodeUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(datanodeUuids_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
storageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(storageUuids_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public BlockWithLocationsProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new BlockWithLocationsProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// repeated string datanodeUuids = 2;
public static final int DATANODEUUIDS_FIELD_NUMBER = 2;
private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList datanodeUuids_;
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public java.util.List
getDatanodeUuidsList() {
return datanodeUuids_;
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public int getDatanodeUuidsCount() {
return datanodeUuids_.size();
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public java.lang.String getDatanodeUuids(int index) {
return datanodeUuids_.get(index);
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getDatanodeUuidsBytes(int index) {
return datanodeUuids_.getByteString(index);
}
// repeated string storageUuids = 3;
public static final int STORAGEUUIDS_FIELD_NUMBER = 3;
private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList storageUuids_;
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public java.util.List
getStorageUuidsList() {
return storageUuids_;
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public int getStorageUuidsCount() {
return storageUuids_.size();
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public java.lang.String getStorageUuids(int index) {
return storageUuids_.get(index);
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getStorageUuidsBytes(int index) {
return storageUuids_.getByteString(index);
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
public static final int STORAGETYPES_FIELD_NUMBER = 4;
private java.util.List storageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public java.util.List getStorageTypesList() {
return storageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
// optional bytes indices = 5;
public static final int INDICES_FIELD_NUMBER = 5;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString indices_;
/**
* optional bytes indices = 5;
*/
public boolean hasIndices() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes indices = 5;
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getIndices() {
return indices_;
}
// optional uint32 dataBlockNum = 6;
public static final int DATABLOCKNUM_FIELD_NUMBER = 6;
private int dataBlockNum_;
/**
* optional uint32 dataBlockNum = 6;
*/
public boolean hasDataBlockNum() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 dataBlockNum = 6;
*/
public int getDataBlockNum() {
return dataBlockNum_;
}
// optional uint32 cellSize = 7;
public static final int CELLSIZE_FIELD_NUMBER = 7;
private int cellSize_;
/**
* optional uint32 cellSize = 7;
*/
public boolean hasCellSize() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint32 cellSize = 7;
*/
public int getCellSize() {
return cellSize_;
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
datanodeUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
storageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
storageTypes_ = java.util.Collections.emptyList();
indices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
dataBlockNum_ = 0;
cellSize_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
for (int i = 0; i < datanodeUuids_.size(); i++) {
output.writeBytes(2, datanodeUuids_.getByteString(i));
}
for (int i = 0; i < storageUuids_.size(); i++) {
output.writeBytes(3, storageUuids_.getByteString(i));
}
for (int i = 0; i < storageTypes_.size(); i++) {
output.writeEnum(4, storageTypes_.get(i).getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(5, indices_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(6, dataBlockNum_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(7, cellSize_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
{
int dataSize = 0;
for (int i = 0; i < datanodeUuids_.size(); i++) {
dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(datanodeUuids_.getByteString(i));
}
size += dataSize;
size += 1 * getDatanodeUuidsList().size();
}
{
int dataSize = 0;
for (int i = 0; i < storageUuids_.size(); i++) {
dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(storageUuids_.getByteString(i));
}
size += dataSize;
size += 1 * getStorageUuidsList().size();
}
{
int dataSize = 0;
for (int i = 0; i < storageTypes_.size(); i++) {
dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(storageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * storageTypes_.size();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(5, indices_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(6, dataBlockNum_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, cellSize_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && getDatanodeUuidsList()
.equals(other.getDatanodeUuidsList());
result = result && getStorageUuidsList()
.equals(other.getStorageUuidsList());
result = result && getStorageTypesList()
.equals(other.getStorageTypesList());
result = result && (hasIndices() == other.hasIndices());
if (hasIndices()) {
result = result && getIndices()
.equals(other.getIndices());
}
result = result && (hasDataBlockNum() == other.hasDataBlockNum());
if (hasDataBlockNum()) {
result = result && (getDataBlockNum()
== other.getDataBlockNum());
}
result = result && (hasCellSize() == other.hasCellSize());
if (hasCellSize()) {
result = result && (getCellSize()
== other.getCellSize());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (getDatanodeUuidsCount() > 0) {
hash = (37 * hash) + DATANODEUUIDS_FIELD_NUMBER;
hash = (53 * hash) + getDatanodeUuidsList().hashCode();
}
if (getStorageUuidsCount() > 0) {
hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuidsList().hashCode();
}
if (getStorageTypesCount() > 0) {
hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getStorageTypesList());
}
if (hasIndices()) {
hash = (37 * hash) + INDICES_FIELD_NUMBER;
hash = (53 * hash) + getIndices().hashCode();
}
if (hasDataBlockNum()) {
hash = (37 * hash) + DATABLOCKNUM_FIELD_NUMBER;
hash = (53 * hash) + getDataBlockNum();
}
if (hasCellSize()) {
hash = (37 * hash) + CELLSIZE_FIELD_NUMBER;
hash = (53 * hash) + getCellSize();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockWithLocationsProto}
*
*
**
* Block and datanodes where is it located
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
datanodeUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
storageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
indices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
dataBlockNum_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
cellSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
datanodeUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(
datanodeUuids_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.datanodeUuids_ = datanodeUuids_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
storageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(
storageUuids_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.storageUuids_ = storageUuids_;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.storageTypes_ = storageTypes_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000002;
}
result.indices_ = indices_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000004;
}
result.dataBlockNum_ = dataBlockNum_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000008;
}
result.cellSize_ = cellSize_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (!other.datanodeUuids_.isEmpty()) {
if (datanodeUuids_.isEmpty()) {
datanodeUuids_ = other.datanodeUuids_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureDatanodeUuidsIsMutable();
datanodeUuids_.addAll(other.datanodeUuids_);
}
onChanged();
}
if (!other.storageUuids_.isEmpty()) {
if (storageUuids_.isEmpty()) {
storageUuids_ = other.storageUuids_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureStorageUuidsIsMutable();
storageUuids_.addAll(other.storageUuids_);
}
onChanged();
}
if (!other.storageTypes_.isEmpty()) {
if (storageTypes_.isEmpty()) {
storageTypes_ = other.storageTypes_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureStorageTypesIsMutable();
storageTypes_.addAll(other.storageTypes_);
}
onChanged();
}
if (other.hasIndices()) {
setIndices(other.getIndices());
}
if (other.hasDataBlockNum()) {
setDataBlockNum(other.getDataBlockNum());
}
if (other.hasCellSize()) {
setCellSize(other.getCellSize());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*
*
* Block
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// repeated string datanodeUuids = 2;
private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList datanodeUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureDatanodeUuidsIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
datanodeUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(datanodeUuids_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public java.util.List
getDatanodeUuidsList() {
return java.util.Collections.unmodifiableList(datanodeUuids_);
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public int getDatanodeUuidsCount() {
return datanodeUuids_.size();
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public java.lang.String getDatanodeUuids(int index) {
return datanodeUuids_.get(index);
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getDatanodeUuidsBytes(int index) {
return datanodeUuids_.getByteString(index);
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public Builder setDatanodeUuids(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodeUuidsIsMutable();
datanodeUuids_.set(index, value);
onChanged();
return this;
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public Builder addDatanodeUuids(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodeUuidsIsMutable();
datanodeUuids_.add(value);
onChanged();
return this;
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public Builder addAllDatanodeUuids(
java.lang.Iterable values) {
ensureDatanodeUuidsIsMutable();
super.addAll(values, datanodeUuids_);
onChanged();
return this;
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public Builder clearDatanodeUuids() {
datanodeUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* repeated string datanodeUuids = 2;
*
*
* Datanodes with replicas of the block
*
*/
public Builder addDatanodeUuidsBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodeUuidsIsMutable();
datanodeUuids_.add(value);
onChanged();
return this;
}
// repeated string storageUuids = 3;
private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList storageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureStorageUuidsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
storageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(storageUuids_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public java.util.List
getStorageUuidsList() {
return java.util.Collections.unmodifiableList(storageUuids_);
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public int getStorageUuidsCount() {
return storageUuids_.size();
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public java.lang.String getStorageUuids(int index) {
return storageUuids_.get(index);
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getStorageUuidsBytes(int index) {
return storageUuids_.getByteString(index);
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public Builder setStorageUuids(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.set(index, value);
onChanged();
return this;
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public Builder addStorageUuids(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.add(value);
onChanged();
return this;
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public Builder addAllStorageUuids(
java.lang.Iterable values) {
ensureStorageUuidsIsMutable();
super.addAll(values, storageUuids_);
onChanged();
return this;
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public Builder clearStorageUuids() {
storageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
/**
* repeated string storageUuids = 3;
*
*
* Storages with replicas of the block
*
*/
public Builder addStorageUuidsBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.add(value);
onChanged();
return this;
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
private java.util.List storageTypes_ =
java.util.Collections.emptyList();
private void ensureStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
storageTypes_ = new java.util.ArrayList(storageTypes_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public java.util.List getStorageTypesList() {
return java.util.Collections.unmodifiableList(storageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public Builder setStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public Builder addAllStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureStorageTypesIsMutable();
super.addAll(values, storageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4;
*/
public Builder clearStorageTypes() {
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
// optional bytes indices = 5;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString indices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes indices = 5;
*/
public boolean hasIndices() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes indices = 5;
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getIndices() {
return indices_;
}
/**
* optional bytes indices = 5;
*/
public Builder setIndices(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
indices_ = value;
onChanged();
return this;
}
/**
* optional bytes indices = 5;
*/
public Builder clearIndices() {
bitField0_ = (bitField0_ & ~0x00000010);
indices_ = getDefaultInstance().getIndices();
onChanged();
return this;
}
// optional uint32 dataBlockNum = 6;
private int dataBlockNum_ ;
/**
* optional uint32 dataBlockNum = 6;
*/
public boolean hasDataBlockNum() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint32 dataBlockNum = 6;
*/
public int getDataBlockNum() {
return dataBlockNum_;
}
/**
* optional uint32 dataBlockNum = 6;
*/
public Builder setDataBlockNum(int value) {
bitField0_ |= 0x00000020;
dataBlockNum_ = value;
onChanged();
return this;
}
/**
* optional uint32 dataBlockNum = 6;
*/
public Builder clearDataBlockNum() {
bitField0_ = (bitField0_ & ~0x00000020);
dataBlockNum_ = 0;
onChanged();
return this;
}
// optional uint32 cellSize = 7;
private int cellSize_ ;
/**
* optional uint32 cellSize = 7;
*/
public boolean hasCellSize() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 cellSize = 7;
*/
public int getCellSize() {
return cellSize_;
}
/**
* optional uint32 cellSize = 7;
*/
public Builder setCellSize(int value) {
bitField0_ |= 0x00000040;
cellSize_ = value;
onChanged();
return this;
}
/**
* optional uint32 cellSize = 7;
*/
public Builder clearCellSize() {
bitField0_ = (bitField0_ & ~0x00000040);
cellSize_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockWithLocationsProto)
}
static {
defaultInstance = new BlockWithLocationsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockWithLocationsProto)
}
public interface BlocksWithLocationsProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.BlocksWithLocationsProto}
*
*
**
* List of block with locations
*
*/
public static final class BlocksWithLocationsProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements BlocksWithLocationsProtoOrBuilder {
// Use BlocksWithLocationsProto.newBuilder() to construct.
private BlocksWithLocationsProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlocksWithLocationsProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlocksWithLocationsProto defaultInstance;
public static BlocksWithLocationsProto getDefaultInstance() {
return defaultInstance;
}
public BlocksWithLocationsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlocksWithLocationsProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public BlocksWithLocationsProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new BlocksWithLocationsProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
public static final int BLOCKS_FIELD_NUMBER = 1;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(1, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < blocks_.size(); i++) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(1, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto) obj;
boolean result = true;
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlocksWithLocationsProto}
*
*
**
* List of block with locations
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto(this);
int from_bitField0_ = bitField0_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto.getDefaultInstance()) return this;
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
blocksBuilder_ =
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000001;
}
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlocksWithLocationsProto)
}
static {
defaultInstance = new BlocksWithLocationsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlocksWithLocationsProto)
}
public interface RemoteEditLogProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required uint64 startTxId = 1;
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
boolean hasStartTxId();
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
long getStartTxId();
// required uint64 endTxId = 2;
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
boolean hasEndTxId();
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
long getEndTxId();
// optional bool isInProgress = 3 [default = false];
/**
* optional bool isInProgress = 3 [default = false];
*/
boolean hasIsInProgress();
/**
* optional bool isInProgress = 3 [default = false];
*/
boolean getIsInProgress();
}
/**
* Protobuf type {@code hadoop.hdfs.RemoteEditLogProto}
*
*
**
* Editlog information with available transactions
*
*/
public static final class RemoteEditLogProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements RemoteEditLogProtoOrBuilder {
// Use RemoteEditLogProto.newBuilder() to construct.
private RemoteEditLogProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RemoteEditLogProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RemoteEditLogProto defaultInstance;
public static RemoteEditLogProto getDefaultInstance() {
return defaultInstance;
}
public RemoteEditLogProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RemoteEditLogProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
startTxId_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
endTxId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
isInProgress_ = input.readBool();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public RemoteEditLogProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new RemoteEditLogProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 startTxId = 1;
public static final int STARTTXID_FIELD_NUMBER = 1;
private long startTxId_;
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public boolean hasStartTxId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public long getStartTxId() {
return startTxId_;
}
// required uint64 endTxId = 2;
public static final int ENDTXID_FIELD_NUMBER = 2;
private long endTxId_;
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public boolean hasEndTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public long getEndTxId() {
return endTxId_;
}
// optional bool isInProgress = 3 [default = false];
public static final int ISINPROGRESS_FIELD_NUMBER = 3;
private boolean isInProgress_;
/**
* optional bool isInProgress = 3 [default = false];
*/
public boolean hasIsInProgress() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bool isInProgress = 3 [default = false];
*/
public boolean getIsInProgress() {
return isInProgress_;
}
private void initFields() {
startTxId_ = 0L;
endTxId_ = 0L;
isInProgress_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStartTxId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEndTxId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, startTxId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, endTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(3, isInProgress_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, startTxId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, endTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBoolSize(3, isInProgress_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto) obj;
boolean result = true;
result = result && (hasStartTxId() == other.hasStartTxId());
if (hasStartTxId()) {
result = result && (getStartTxId()
== other.getStartTxId());
}
result = result && (hasEndTxId() == other.hasEndTxId());
if (hasEndTxId()) {
result = result && (getEndTxId()
== other.getEndTxId());
}
result = result && (hasIsInProgress() == other.hasIsInProgress());
if (hasIsInProgress()) {
result = result && (getIsInProgress()
== other.getIsInProgress());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStartTxId()) {
hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getStartTxId());
}
if (hasEndTxId()) {
hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getEndTxId());
}
if (hasIsInProgress()) {
hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIsInProgress());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RemoteEditLogProto}
*
*
**
* Editlog information with available transactions
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
startTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
endTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
isInProgress_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.startTxId_ = startTxId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.endTxId_ = endTxId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.isInProgress_ = isInProgress_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.getDefaultInstance()) return this;
if (other.hasStartTxId()) {
setStartTxId(other.getStartTxId());
}
if (other.hasEndTxId()) {
setEndTxId(other.getEndTxId());
}
if (other.hasIsInProgress()) {
setIsInProgress(other.getIsInProgress());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStartTxId()) {
return false;
}
if (!hasEndTxId()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 startTxId = 1;
private long startTxId_ ;
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public boolean hasStartTxId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public long getStartTxId() {
return startTxId_;
}
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public Builder setStartTxId(long value) {
bitField0_ |= 0x00000001;
startTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 startTxId = 1;
*
*
* Starting available edit log transaction
*
*/
public Builder clearStartTxId() {
bitField0_ = (bitField0_ & ~0x00000001);
startTxId_ = 0L;
onChanged();
return this;
}
// required uint64 endTxId = 2;
private long endTxId_ ;
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public boolean hasEndTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public long getEndTxId() {
return endTxId_;
}
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public Builder setEndTxId(long value) {
bitField0_ |= 0x00000002;
endTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 endTxId = 2;
*
*
* Ending available edit log transaction
*
*/
public Builder clearEndTxId() {
bitField0_ = (bitField0_ & ~0x00000002);
endTxId_ = 0L;
onChanged();
return this;
}
// optional bool isInProgress = 3 [default = false];
private boolean isInProgress_ ;
/**
* optional bool isInProgress = 3 [default = false];
*/
public boolean hasIsInProgress() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bool isInProgress = 3 [default = false];
*/
public boolean getIsInProgress() {
return isInProgress_;
}
/**
* optional bool isInProgress = 3 [default = false];
*/
public Builder setIsInProgress(boolean value) {
bitField0_ |= 0x00000004;
isInProgress_ = value;
onChanged();
return this;
}
/**
* optional bool isInProgress = 3 [default = false];
*/
public Builder clearIsInProgress() {
bitField0_ = (bitField0_ & ~0x00000004);
isInProgress_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteEditLogProto)
}
static {
defaultInstance = new RemoteEditLogProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteEditLogProto)
}
public interface RemoteEditLogManifestProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
java.util.List
getLogsList();
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto getLogs(int index);
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
int getLogsCount();
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder>
getLogsOrBuilderList();
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder(
int index);
// optional uint64 committedTxnId = 2;
/**
* optional uint64 committedTxnId = 2;
*/
boolean hasCommittedTxnId();
/**
* optional uint64 committedTxnId = 2;
*/
long getCommittedTxnId();
}
/**
* Protobuf type {@code hadoop.hdfs.RemoteEditLogManifestProto}
*
*
**
* Enumeration of editlogs available on a remote namenode
*
*/
public static final class RemoteEditLogManifestProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements RemoteEditLogManifestProtoOrBuilder {
// Use RemoteEditLogManifestProto.newBuilder() to construct.
private RemoteEditLogManifestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RemoteEditLogManifestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RemoteEditLogManifestProto defaultInstance;
public static RemoteEditLogManifestProto getDefaultInstance() {
return defaultInstance;
}
public RemoteEditLogManifestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RemoteEditLogManifestProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
logs_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
logs_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.PARSER, extensionRegistry));
break;
}
case 16: {
bitField0_ |= 0x00000001;
committedTxnId_ = input.readUInt64();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
logs_ = java.util.Collections.unmodifiableList(logs_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public RemoteEditLogManifestProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new RemoteEditLogManifestProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
public static final int LOGS_FIELD_NUMBER = 1;
private java.util.List logs_;
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public java.util.List getLogsList() {
return logs_;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder>
getLogsOrBuilderList() {
return logs_;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public int getLogsCount() {
return logs_.size();
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto getLogs(int index) {
return logs_.get(index);
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder(
int index) {
return logs_.get(index);
}
// optional uint64 committedTxnId = 2;
public static final int COMMITTEDTXNID_FIELD_NUMBER = 2;
private long committedTxnId_;
/**
* optional uint64 committedTxnId = 2;
*/
public boolean hasCommittedTxnId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint64 committedTxnId = 2;
*/
public long getCommittedTxnId() {
return committedTxnId_;
}
private void initFields() {
logs_ = java.util.Collections.emptyList();
committedTxnId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getLogsCount(); i++) {
if (!getLogs(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < logs_.size(); i++) {
output.writeMessage(1, logs_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(2, committedTxnId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < logs_.size(); i++) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(1, logs_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, committedTxnId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto) obj;
boolean result = true;
result = result && getLogsList()
.equals(other.getLogsList());
result = result && (hasCommittedTxnId() == other.hasCommittedTxnId());
if (hasCommittedTxnId()) {
result = result && (getCommittedTxnId()
== other.getCommittedTxnId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getLogsCount() > 0) {
hash = (37 * hash) + LOGS_FIELD_NUMBER;
hash = (53 * hash) + getLogsList().hashCode();
}
if (hasCommittedTxnId()) {
hash = (37 * hash) + COMMITTEDTXNID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCommittedTxnId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RemoteEditLogManifestProto}
*
*
**
* Enumeration of editlogs available on a remote namenode
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getLogsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (logsBuilder_ == null) {
logs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
logsBuilder_.clear();
}
committedTxnId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (logsBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
logs_ = java.util.Collections.unmodifiableList(logs_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.logs_ = logs_;
} else {
result.logs_ = logsBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.committedTxnId_ = committedTxnId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto.getDefaultInstance()) return this;
if (logsBuilder_ == null) {
if (!other.logs_.isEmpty()) {
if (logs_.isEmpty()) {
logs_ = other.logs_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureLogsIsMutable();
logs_.addAll(other.logs_);
}
onChanged();
}
} else {
if (!other.logs_.isEmpty()) {
if (logsBuilder_.isEmpty()) {
logsBuilder_.dispose();
logsBuilder_ = null;
logs_ = other.logs_;
bitField0_ = (bitField0_ & ~0x00000001);
logsBuilder_ =
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLogsFieldBuilder() : null;
} else {
logsBuilder_.addAllMessages(other.logs_);
}
}
}
if (other.hasCommittedTxnId()) {
setCommittedTxnId(other.getCommittedTxnId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getLogsCount(); i++) {
if (!getLogs(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
private java.util.List logs_ =
java.util.Collections.emptyList();
private void ensureLogsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
logs_ = new java.util.ArrayList(logs_);
bitField0_ |= 0x00000001;
}
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder> logsBuilder_;
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public java.util.List getLogsList() {
if (logsBuilder_ == null) {
return java.util.Collections.unmodifiableList(logs_);
} else {
return logsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public int getLogsCount() {
if (logsBuilder_ == null) {
return logs_.size();
} else {
return logsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto getLogs(int index) {
if (logsBuilder_ == null) {
return logs_.get(index);
} else {
return logsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder setLogs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto value) {
if (logsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLogsIsMutable();
logs_.set(index, value);
onChanged();
} else {
logsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder setLogs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder builderForValue) {
if (logsBuilder_ == null) {
ensureLogsIsMutable();
logs_.set(index, builderForValue.build());
onChanged();
} else {
logsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder addLogs(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto value) {
if (logsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLogsIsMutable();
logs_.add(value);
onChanged();
} else {
logsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder addLogs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto value) {
if (logsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLogsIsMutable();
logs_.add(index, value);
onChanged();
} else {
logsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder addLogs(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder builderForValue) {
if (logsBuilder_ == null) {
ensureLogsIsMutable();
logs_.add(builderForValue.build());
onChanged();
} else {
logsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder addLogs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder builderForValue) {
if (logsBuilder_ == null) {
ensureLogsIsMutable();
logs_.add(index, builderForValue.build());
onChanged();
} else {
logsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder addAllLogs(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto> values) {
if (logsBuilder_ == null) {
ensureLogsIsMutable();
super.addAll(values, logs_);
onChanged();
} else {
logsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder clearLogs() {
if (logsBuilder_ == null) {
logs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
logsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public Builder removeLogs(int index) {
if (logsBuilder_ == null) {
ensureLogsIsMutable();
logs_.remove(index);
onChanged();
} else {
logsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder getLogsBuilder(
int index) {
return getLogsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder(
int index) {
if (logsBuilder_ == null) {
return logs_.get(index); } else {
return logsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder>
getLogsOrBuilderList() {
if (logsBuilder_ != null) {
return logsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(logs_);
}
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder addLogsBuilder() {
return getLogsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder addLogsBuilder(
int index) {
return getLogsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.RemoteEditLogProto logs = 1;
*/
public java.util.List
getLogsBuilderList() {
return getLogsFieldBuilder().getBuilderList();
}
private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder>
getLogsFieldBuilder() {
if (logsBuilder_ == null) {
logsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProtoOrBuilder>(
logs_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
logs_ = null;
}
return logsBuilder_;
}
// optional uint64 committedTxnId = 2;
private long committedTxnId_ ;
/**
* optional uint64 committedTxnId = 2;
*/
public boolean hasCommittedTxnId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 committedTxnId = 2;
*/
public long getCommittedTxnId() {
return committedTxnId_;
}
/**
* optional uint64 committedTxnId = 2;
*/
public Builder setCommittedTxnId(long value) {
bitField0_ |= 0x00000002;
committedTxnId_ = value;
onChanged();
return this;
}
/**
* optional uint64 committedTxnId = 2;
*/
public Builder clearCommittedTxnId() {
bitField0_ = (bitField0_ & ~0x00000002);
committedTxnId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteEditLogManifestProto)
}
static {
defaultInstance = new RemoteEditLogManifestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteEditLogManifestProto)
}
public interface NamespaceInfoProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required string buildVersion = 1;
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
boolean hasBuildVersion();
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
java.lang.String getBuildVersion();
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBuildVersionBytes();
// required uint32 unused = 2;
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
boolean hasUnused();
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
int getUnused();
// required string blockPoolID = 3;
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
boolean hasBlockPoolID();
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
java.lang.String getBlockPoolID();
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIDBytes();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
// required string softwareVersion = 5;
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
boolean hasSoftwareVersion();
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
java.lang.String getSoftwareVersion();
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getSoftwareVersionBytes();
// optional uint64 capabilities = 6 [default = 0];
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
boolean hasCapabilities();
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
long getCapabilities();
// optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
boolean hasState();
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState();
}
/**
* Protobuf type {@code hadoop.hdfs.NamespaceInfoProto}
*
*
**
* Namespace information that describes namespace on a namenode
*
*/
public static final class NamespaceInfoProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements NamespaceInfoProtoOrBuilder {
// Use NamespaceInfoProto.newBuilder() to construct.
private NamespaceInfoProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NamespaceInfoProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NamespaceInfoProto defaultInstance;
public static NamespaceInfoProto getDefaultInstance() {
return defaultInstance;
}
public NamespaceInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NamespaceInfoProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
buildVersion_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
unused_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
blockPoolID_ = input.readBytes();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
softwareVersion_ = input.readBytes();
break;
}
case 48: {
bitField0_ |= 0x00000020;
capabilities_ = input.readUInt64();
break;
}
case 56: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State value = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
bitField0_ |= 0x00000040;
state_ = value;
}
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public NamespaceInfoProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new NamespaceInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string buildVersion = 1;
public static final int BUILDVERSION_FIELD_NUMBER = 1;
private java.lang.Object buildVersion_;
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public boolean hasBuildVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public java.lang.String getBuildVersion() {
java.lang.Object ref = buildVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
buildVersion_ = s;
}
return s;
}
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBuildVersionBytes() {
java.lang.Object ref = buildVersion_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
buildVersion_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required uint32 unused = 2;
public static final int UNUSED_FIELD_NUMBER = 2;
private int unused_;
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public boolean hasUnused() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public int getUnused() {
return unused_;
}
// required string blockPoolID = 3;
public static final int BLOCKPOOLID_FIELD_NUMBER = 3;
private java.lang.Object blockPoolID_;
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public boolean hasBlockPoolID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public java.lang.String getBlockPoolID() {
java.lang.Object ref = blockPoolID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolID_ = s;
}
return s;
}
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIDBytes() {
java.lang.Object ref = blockPoolID_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolID_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
public static final int STORAGEINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
// required string softwareVersion = 5;
public static final int SOFTWAREVERSION_FIELD_NUMBER = 5;
private java.lang.Object softwareVersion_;
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
softwareVersion_ = s;
}
return s;
}
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// optional uint64 capabilities = 6 [default = 0];
public static final int CAPABILITIES_FIELD_NUMBER = 6;
private long capabilities_;
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public boolean hasCapabilities() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public long getCapabilities() {
return capabilities_;
}
// optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
public static final int STATE_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State state_;
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
private void initFields() {
buildVersion_ = "";
unused_ = 0;
blockPoolID_ = "";
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
softwareVersion_ = "";
capabilities_ = 0L;
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBuildVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUnused()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSoftwareVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBuildVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, unused_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getBlockPoolIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, storageInfo_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, capabilities_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeEnum(7, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBuildVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, unused_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getBlockPoolIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(4, storageInfo_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, capabilities_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeEnumSize(7, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto) obj;
boolean result = true;
result = result && (hasBuildVersion() == other.hasBuildVersion());
if (hasBuildVersion()) {
result = result && getBuildVersion()
.equals(other.getBuildVersion());
}
result = result && (hasUnused() == other.hasUnused());
if (hasUnused()) {
result = result && (getUnused()
== other.getUnused());
}
result = result && (hasBlockPoolID() == other.hasBlockPoolID());
if (hasBlockPoolID()) {
result = result && getBlockPoolID()
.equals(other.getBlockPoolID());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result && (hasSoftwareVersion() == other.hasSoftwareVersion());
if (hasSoftwareVersion()) {
result = result && getSoftwareVersion()
.equals(other.getSoftwareVersion());
}
result = result && (hasCapabilities() == other.hasCapabilities());
if (hasCapabilities()) {
result = result && (getCapabilities()
== other.getCapabilities());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBuildVersion()) {
hash = (37 * hash) + BUILDVERSION_FIELD_NUMBER;
hash = (53 * hash) + getBuildVersion().hashCode();
}
if (hasUnused()) {
hash = (37 * hash) + UNUSED_FIELD_NUMBER;
hash = (53 * hash) + getUnused();
}
if (hasBlockPoolID()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolID().hashCode();
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
if (hasSoftwareVersion()) {
hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
hash = (53 * hash) + getSoftwareVersion().hashCode();
}
if (hasCapabilities()) {
hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapabilities());
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.NamespaceInfoProto}
*
*
**
* Namespace information that describes namespace on a namenode
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
buildVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
unused_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolID_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
softwareVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
capabilities_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.buildVersion_ = buildVersion_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.unused_ = unused_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.blockPoolID_ = blockPoolID_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.softwareVersion_ = softwareVersion_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.capabilities_ = capabilities_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance()) return this;
if (other.hasBuildVersion()) {
bitField0_ |= 0x00000001;
buildVersion_ = other.buildVersion_;
onChanged();
}
if (other.hasUnused()) {
setUnused(other.getUnused());
}
if (other.hasBlockPoolID()) {
bitField0_ |= 0x00000004;
blockPoolID_ = other.blockPoolID_;
onChanged();
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
if (other.hasSoftwareVersion()) {
bitField0_ |= 0x00000010;
softwareVersion_ = other.softwareVersion_;
onChanged();
}
if (other.hasCapabilities()) {
setCapabilities(other.getCapabilities());
}
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBuildVersion()) {
return false;
}
if (!hasUnused()) {
return false;
}
if (!hasBlockPoolID()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!hasSoftwareVersion()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string buildVersion = 1;
private java.lang.Object buildVersion_ = "";
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public boolean hasBuildVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public java.lang.String getBuildVersion() {
java.lang.Object ref = buildVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
buildVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBuildVersionBytes() {
java.lang.Object ref = buildVersion_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
buildVersion_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public Builder setBuildVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
buildVersion_ = value;
onChanged();
return this;
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public Builder clearBuildVersion() {
bitField0_ = (bitField0_ & ~0x00000001);
buildVersion_ = getDefaultInstance().getBuildVersion();
onChanged();
return this;
}
/**
* required string buildVersion = 1;
*
*
* Software revision version (e.g. an svn or git revision)
*
*/
public Builder setBuildVersionBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
buildVersion_ = value;
onChanged();
return this;
}
// required uint32 unused = 2;
private int unused_ ;
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public boolean hasUnused() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public int getUnused() {
return unused_;
}
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public Builder setUnused(int value) {
bitField0_ |= 0x00000002;
unused_ = value;
onChanged();
return this;
}
/**
* required uint32 unused = 2;
*
*
* Retained for backward compatibility
*
*/
public Builder clearUnused() {
bitField0_ = (bitField0_ & ~0x00000002);
unused_ = 0;
onChanged();
return this;
}
// required string blockPoolID = 3;
private java.lang.Object blockPoolID_ = "";
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public boolean hasBlockPoolID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public java.lang.String getBlockPoolID() {
java.lang.Object ref = blockPoolID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIDBytes() {
java.lang.Object ref = blockPoolID_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolID_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public Builder setBlockPoolID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
blockPoolID_ = value;
onChanged();
return this;
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public Builder clearBlockPoolID() {
bitField0_ = (bitField0_ & ~0x00000004);
blockPoolID_ = getDefaultInstance().getBlockPoolID();
onChanged();
return this;
}
/**
* required string blockPoolID = 3;
*
*
* block pool used by the namespace
*
*/
public Builder setBlockPoolIDBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
blockPoolID_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*
*
* Node information
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// required string softwareVersion = 5;
private java.lang.Object softwareVersion_ = "";
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
softwareVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public Builder setSoftwareVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
softwareVersion_ = value;
onChanged();
return this;
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public Builder clearSoftwareVersion() {
bitField0_ = (bitField0_ & ~0x00000010);
softwareVersion_ = getDefaultInstance().getSoftwareVersion();
onChanged();
return this;
}
/**
* required string softwareVersion = 5;
*
*
* Software version number (e.g. 2.0.0)
*
*/
public Builder setSoftwareVersionBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
softwareVersion_ = value;
onChanged();
return this;
}
// optional uint64 capabilities = 6 [default = 0];
private long capabilities_ ;
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public boolean hasCapabilities() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public long getCapabilities() {
return capabilities_;
}
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public Builder setCapabilities(long value) {
bitField0_ |= 0x00000020;
capabilities_ = value;
onChanged();
return this;
}
/**
* optional uint64 capabilities = 6 [default = 0];
*
*
* feature flags
*
*/
public Builder clearCapabilities() {
bitField0_ = (bitField0_ & ~0x00000020);
capabilities_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000040;
state_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 7;
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000040);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamespaceInfoProto)
}
static {
defaultInstance = new NamespaceInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.NamespaceInfoProto)
}
public interface RecoveringBlockProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required uint64 newGenStamp = 1;
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
boolean hasNewGenStamp();
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
long getNewGenStamp();
// required .hadoop.hdfs.LocatedBlockProto block = 2;
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock();
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder();
// optional .hadoop.hdfs.BlockProto truncateBlock = 3;
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
boolean hasTruncateBlock();
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock();
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder();
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
boolean hasEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
// optional bytes blockIndices = 5;
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
boolean hasBlockIndices();
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getBlockIndices();
}
/**
* Protobuf type {@code hadoop.hdfs.RecoveringBlockProto}
*
*
**
* Block that needs to be recovered with at a given location
*
*/
public static final class RecoveringBlockProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements RecoveringBlockProtoOrBuilder {
// Use RecoveringBlockProto.newBuilder() to construct.
private RecoveringBlockProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RecoveringBlockProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RecoveringBlockProto defaultInstance;
public static RecoveringBlockProto getDefaultInstance() {
return defaultInstance;
}
public RecoveringBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RecoveringBlockProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
newGenStamp_ = input.readUInt64();
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = truncateBlock_.toBuilder();
}
truncateBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(truncateBlock_);
truncateBlock_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = ecPolicy_.toBuilder();
}
ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(ecPolicy_);
ecPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
blockIndices_ = input.readBytes();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public RecoveringBlockProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new RecoveringBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 newGenStamp = 1;
public static final int NEWGENSTAMP_FIELD_NUMBER = 1;
private long newGenStamp_;
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public boolean hasNewGenStamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public long getNewGenStamp() {
return newGenStamp_;
}
// required .hadoop.hdfs.LocatedBlockProto block = 2;
public static final int BLOCK_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_;
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// optional .hadoop.hdfs.BlockProto truncateBlock = 3;
public static final int TRUNCATEBLOCK_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto truncateBlock_;
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public boolean hasTruncateBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock() {
return truncateBlock_;
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder() {
return truncateBlock_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
public static final int ECPOLICY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
return ecPolicy_;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
return ecPolicy_;
}
// optional bytes blockIndices = 5;
public static final int BLOCKINDICES_FIELD_NUMBER = 5;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString blockIndices_;
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public boolean hasBlockIndices() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getBlockIndices() {
return blockIndices_;
}
private void initFields() {
newGenStamp_ = 0L;
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
blockIndices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasNewGenStamp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTruncateBlock()) {
if (!getTruncateBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, newGenStamp_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, truncateBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, ecPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, blockIndices_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, newGenStamp_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(2, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(3, truncateBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(4, ecPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(5, blockIndices_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto) obj;
boolean result = true;
result = result && (hasNewGenStamp() == other.hasNewGenStamp());
if (hasNewGenStamp()) {
result = result && (getNewGenStamp()
== other.getNewGenStamp());
}
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasTruncateBlock() == other.hasTruncateBlock());
if (hasTruncateBlock()) {
result = result && getTruncateBlock()
.equals(other.getTruncateBlock());
}
result = result && (hasEcPolicy() == other.hasEcPolicy());
if (hasEcPolicy()) {
result = result && getEcPolicy()
.equals(other.getEcPolicy());
}
result = result && (hasBlockIndices() == other.hasBlockIndices());
if (hasBlockIndices()) {
result = result && getBlockIndices()
.equals(other.getBlockIndices());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasNewGenStamp()) {
hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNewGenStamp());
}
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasTruncateBlock()) {
hash = (37 * hash) + TRUNCATEBLOCK_FIELD_NUMBER;
hash = (53 * hash) + getTruncateBlock().hashCode();
}
if (hasEcPolicy()) {
hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getEcPolicy().hashCode();
}
if (hasBlockIndices()) {
hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
hash = (53 * hash) + getBlockIndices().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RecoveringBlockProto}
*
*
**
* Block that needs to be recovered with at a given location
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTruncateBlockFieldBuilder();
getEcPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
newGenStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (truncateBlockBuilder_ == null) {
truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
} else {
truncateBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
blockIndices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.newGenStamp_ = newGenStamp_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (truncateBlockBuilder_ == null) {
result.truncateBlock_ = truncateBlock_;
} else {
result.truncateBlock_ = truncateBlockBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (ecPolicyBuilder_ == null) {
result.ecPolicy_ = ecPolicy_;
} else {
result.ecPolicy_ = ecPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.blockIndices_ = blockIndices_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance()) return this;
if (other.hasNewGenStamp()) {
setNewGenStamp(other.getNewGenStamp());
}
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasTruncateBlock()) {
mergeTruncateBlock(other.getTruncateBlock());
}
if (other.hasEcPolicy()) {
mergeEcPolicy(other.getEcPolicy());
}
if (other.hasBlockIndices()) {
setBlockIndices(other.getBlockIndices());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasNewGenStamp()) {
return false;
}
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (hasTruncateBlock()) {
if (!getTruncateBlock().isInitialized()) {
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 newGenStamp = 1;
private long newGenStamp_ ;
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public boolean hasNewGenStamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public long getNewGenStamp() {
return newGenStamp_;
}
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public Builder setNewGenStamp(long value) {
bitField0_ |= 0x00000001;
newGenStamp_ = value;
onChanged();
return this;
}
/**
* required uint64 newGenStamp = 1;
*
*
* New genstamp post recovery
*
*/
public Builder clearNewGenStamp() {
bitField0_ = (bitField0_ & ~0x00000001);
newGenStamp_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.LocatedBlockProto block = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.LocatedBlockProto block = 2;
*
*
* Block to be recovered
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// optional .hadoop.hdfs.BlockProto truncateBlock = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> truncateBlockBuilder_;
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public boolean hasTruncateBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock() {
if (truncateBlockBuilder_ == null) {
return truncateBlock_;
} else {
return truncateBlockBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public Builder setTruncateBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (truncateBlockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
truncateBlock_ = value;
onChanged();
} else {
truncateBlockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public Builder setTruncateBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (truncateBlockBuilder_ == null) {
truncateBlock_ = builderForValue.build();
onChanged();
} else {
truncateBlockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public Builder mergeTruncateBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (truncateBlockBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
truncateBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
truncateBlock_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(truncateBlock_).mergeFrom(value).buildPartial();
} else {
truncateBlock_ = value;
}
onChanged();
} else {
truncateBlockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public Builder clearTruncateBlock() {
if (truncateBlockBuilder_ == null) {
truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
onChanged();
} else {
truncateBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getTruncateBlockBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getTruncateBlockFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder() {
if (truncateBlockBuilder_ != null) {
return truncateBlockBuilder_.getMessageOrBuilder();
} else {
return truncateBlock_;
}
}
/**
* optional .hadoop.hdfs.BlockProto truncateBlock = 3;
*
*
* New block for recovery (truncate)
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getTruncateBlockFieldBuilder() {
if (truncateBlockBuilder_ == null) {
truncateBlockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
truncateBlock_,
getParentForChildren(),
isClean());
truncateBlock_ = null;
}
return truncateBlockBuilder_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
onChanged();
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
onChanged();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
ecPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial();
} else {
ecPolicy_ = value;
}
onChanged();
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder clearEcPolicy() {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
onChanged();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_;
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
ecPolicy_,
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
// optional bytes blockIndices = 5;
private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString blockIndices_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public boolean hasBlockIndices() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getBlockIndices() {
return blockIndices_;
}
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public Builder setBlockIndices(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
blockIndices_ = value;
onChanged();
return this;
}
/**
* optional bytes blockIndices = 5;
*
*
* block indices of striped internal blocks for each storage in LocatedBlock
*
*/
public Builder clearBlockIndices() {
bitField0_ = (bitField0_ & ~0x00000010);
blockIndices_ = getDefaultInstance().getBlockIndices();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RecoveringBlockProto)
}
static {
defaultInstance = new RecoveringBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RecoveringBlockProto)
}
public interface CheckpointSignatureProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required string blockPoolId = 1;
/**
* required string blockPoolId = 1;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 1;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 1;
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIdBytes();
// required uint64 mostRecentCheckpointTxId = 2;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
boolean hasMostRecentCheckpointTxId();
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
long getMostRecentCheckpointTxId();
// required uint64 curSegmentTxId = 3;
/**
* required uint64 curSegmentTxId = 3;
*/
boolean hasCurSegmentTxId();
/**
* required uint64 curSegmentTxId = 3;
*/
long getCurSegmentTxId();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto}
*
*
**
* Unique signature to identify checkpoint transactions.
*
*/
public static final class CheckpointSignatureProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements CheckpointSignatureProtoOrBuilder {
// Use CheckpointSignatureProto.newBuilder() to construct.
private CheckpointSignatureProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CheckpointSignatureProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CheckpointSignatureProto defaultInstance;
public static CheckpointSignatureProto getDefaultInstance() {
return defaultInstance;
}
public CheckpointSignatureProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CheckpointSignatureProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPoolId_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
mostRecentCheckpointTxId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
curSegmentTxId_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public CheckpointSignatureProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new CheckpointSignatureProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string blockPoolId = 1;
public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 1;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 1;
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required uint64 mostRecentCheckpointTxId = 2;
public static final int MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER = 2;
private long mostRecentCheckpointTxId_;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public boolean hasMostRecentCheckpointTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId_;
}
// required uint64 curSegmentTxId = 3;
public static final int CURSEGMENTTXID_FIELD_NUMBER = 3;
private long curSegmentTxId_;
/**
* required uint64 curSegmentTxId = 3;
*/
public boolean hasCurSegmentTxId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 curSegmentTxId = 3;
*/
public long getCurSegmentTxId() {
return curSegmentTxId_;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
public static final int STORAGEINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
private void initFields() {
blockPoolId_ = "";
mostRecentCheckpointTxId_ = 0L;
curSegmentTxId_ = 0L;
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMostRecentCheckpointTxId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCurSegmentTxId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, mostRecentCheckpointTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, curSegmentTxId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, storageInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, mostRecentCheckpointTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, curSegmentTxId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(4, storageInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto) obj;
boolean result = true;
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && (hasMostRecentCheckpointTxId() == other.hasMostRecentCheckpointTxId());
if (hasMostRecentCheckpointTxId()) {
result = result && (getMostRecentCheckpointTxId()
== other.getMostRecentCheckpointTxId());
}
result = result && (hasCurSegmentTxId() == other.hasCurSegmentTxId());
if (hasCurSegmentTxId()) {
result = result && (getCurSegmentTxId()
== other.getCurSegmentTxId());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (hasMostRecentCheckpointTxId()) {
hash = (37 * hash) + MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMostRecentCheckpointTxId());
}
if (hasCurSegmentTxId()) {
hash = (37 * hash) + CURSEGMENTTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCurSegmentTxId());
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto}
*
*
**
* Unique signature to identify checkpoint transactions.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
mostRecentCheckpointTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
curSegmentTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPoolId_ = blockPoolId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.mostRecentCheckpointTxId_ = mostRecentCheckpointTxId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.curSegmentTxId_ = curSegmentTxId_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance()) return this;
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000001;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (other.hasMostRecentCheckpointTxId()) {
setMostRecentCheckpointTxId(other.getMostRecentCheckpointTxId());
}
if (other.hasCurSegmentTxId()) {
setCurSegmentTxId(other.getCurSegmentTxId());
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPoolId()) {
return false;
}
if (!hasMostRecentCheckpointTxId()) {
return false;
}
if (!hasCurSegmentTxId()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string blockPoolId = 1;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 1;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 1;
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 1;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*/
public Builder setBlockPoolIdBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
// required uint64 mostRecentCheckpointTxId = 2;
private long mostRecentCheckpointTxId_ ;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public boolean hasMostRecentCheckpointTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId_;
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public Builder setMostRecentCheckpointTxId(long value) {
bitField0_ |= 0x00000002;
mostRecentCheckpointTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public Builder clearMostRecentCheckpointTxId() {
bitField0_ = (bitField0_ & ~0x00000002);
mostRecentCheckpointTxId_ = 0L;
onChanged();
return this;
}
// required uint64 curSegmentTxId = 3;
private long curSegmentTxId_ ;
/**
* required uint64 curSegmentTxId = 3;
*/
public boolean hasCurSegmentTxId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 curSegmentTxId = 3;
*/
public long getCurSegmentTxId() {
return curSegmentTxId_;
}
/**
* required uint64 curSegmentTxId = 3;
*/
public Builder setCurSegmentTxId(long value) {
bitField0_ |= 0x00000004;
curSegmentTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 curSegmentTxId = 3;
*/
public Builder clearCurSegmentTxId() {
bitField0_ = (bitField0_ & ~0x00000004);
curSegmentTxId_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckpointSignatureProto)
}
static {
defaultInstance = new CheckpointSignatureProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckpointSignatureProto)
}
public interface CheckpointCommandProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
boolean hasSignature();
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature();
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder();
// required bool needToReturnImage = 2;
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
boolean hasNeedToReturnImage();
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
boolean getNeedToReturnImage();
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointCommandProto}
*
*
**
* Command returned from primary to checkpointing namenode.
* This command has checkpoint signature that identifies
* checkpoint transaction and is needed for further
* communication related to checkpointing.
*
*/
public static final class CheckpointCommandProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements CheckpointCommandProtoOrBuilder {
// Use CheckpointCommandProto.newBuilder() to construct.
private CheckpointCommandProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CheckpointCommandProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CheckpointCommandProto defaultInstance;
public static CheckpointCommandProto getDefaultInstance() {
return defaultInstance;
}
public CheckpointCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CheckpointCommandProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = signature_.toBuilder();
}
signature_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(signature_);
signature_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
needToReturnImage_ = input.readBool();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public CheckpointCommandProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new CheckpointCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
public static final int SIGNATURE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_;
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public boolean hasSignature() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() {
return signature_;
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() {
return signature_;
}
// required bool needToReturnImage = 2;
public static final int NEEDTORETURNIMAGE_FIELD_NUMBER = 2;
private boolean needToReturnImage_;
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public boolean hasNeedToReturnImage() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public boolean getNeedToReturnImage() {
return needToReturnImage_;
}
private void initFields() {
signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance();
needToReturnImage_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSignature()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNeedToReturnImage()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSignature().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, signature_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, needToReturnImage_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(1, signature_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBoolSize(2, needToReturnImage_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto) obj;
boolean result = true;
result = result && (hasSignature() == other.hasSignature());
if (hasSignature()) {
result = result && getSignature()
.equals(other.getSignature());
}
result = result && (hasNeedToReturnImage() == other.hasNeedToReturnImage());
if (hasNeedToReturnImage()) {
result = result && (getNeedToReturnImage()
== other.getNeedToReturnImage());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSignature()) {
hash = (37 * hash) + SIGNATURE_FIELD_NUMBER;
hash = (53 * hash) + getSignature().hashCode();
}
if (hasNeedToReturnImage()) {
hash = (37 * hash) + NEEDTORETURNIMAGE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getNeedToReturnImage());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointCommandProto}
*
*
**
* Command returned from primary to checkpointing namenode.
* This command has checkpoint signature that identifies
* checkpoint transaction and is needed for further
* communication related to checkpointing.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSignatureFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (signatureBuilder_ == null) {
signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance();
} else {
signatureBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
needToReturnImage_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (signatureBuilder_ == null) {
result.signature_ = signature_;
} else {
result.signature_ = signatureBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.needToReturnImage_ = needToReturnImage_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance()) return this;
if (other.hasSignature()) {
mergeSignature(other.getSignature());
}
if (other.hasNeedToReturnImage()) {
setNeedToReturnImage(other.getNeedToReturnImage());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSignature()) {
return false;
}
if (!hasNeedToReturnImage()) {
return false;
}
if (!getSignature().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_;
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public boolean hasSignature() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto getSignature() {
if (signatureBuilder_ == null) {
return signature_;
} else {
return signatureBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) {
if (signatureBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
signature_ = value;
onChanged();
} else {
signatureBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public Builder setSignature(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder builderForValue) {
if (signatureBuilder_ == null) {
signature_ = builderForValue.build();
onChanged();
} else {
signatureBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto value) {
if (signatureBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance()) {
signature_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial();
} else {
signature_ = value;
}
onChanged();
} else {
signatureBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public Builder clearSignature() {
if (signatureBuilder_ == null) {
signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.getDefaultInstance();
onChanged();
} else {
signatureBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder getSignatureBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getSignatureFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() {
if (signatureBuilder_ != null) {
return signatureBuilder_.getMessageOrBuilder();
} else {
return signature_;
}
}
/**
* required .hadoop.hdfs.CheckpointSignatureProto signature = 1;
*
*
* Unique signature to identify checkpoint transation
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder>
getSignatureFieldBuilder() {
if (signatureBuilder_ == null) {
signatureBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProtoOrBuilder>(
signature_,
getParentForChildren(),
isClean());
signature_ = null;
}
return signatureBuilder_;
}
// required bool needToReturnImage = 2;
private boolean needToReturnImage_ ;
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public boolean hasNeedToReturnImage() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public boolean getNeedToReturnImage() {
return needToReturnImage_;
}
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public Builder setNeedToReturnImage(boolean value) {
bitField0_ |= 0x00000002;
needToReturnImage_ = value;
onChanged();
return this;
}
/**
* required bool needToReturnImage = 2;
*
*
* If true, return transfer image to primary upon the completion of checkpoint
*
*/
public Builder clearNeedToReturnImage() {
bitField0_ = (bitField0_ & ~0x00000002);
needToReturnImage_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckpointCommandProto)
}
static {
defaultInstance = new CheckpointCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckpointCommandProto)
}
public interface NamenodeCommandProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required uint32 action = 1;
/**
* required uint32 action = 1;
*/
boolean hasAction();
/**
* required uint32 action = 1;
*/
int getAction();
// required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
boolean hasType();
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type getType();
// optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
boolean hasCheckpointCmd();
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto getCheckpointCmd();
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeCommandProto}
*
*
**
* Command sent from one namenode to another namenode.
*
*/
public static final class NamenodeCommandProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements NamenodeCommandProtoOrBuilder {
// Use NamenodeCommandProto.newBuilder() to construct.
private NamenodeCommandProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NamenodeCommandProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NamenodeCommandProto defaultInstance;
public static NamenodeCommandProto getDefaultInstance() {
return defaultInstance;
}
public NamenodeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NamenodeCommandProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
action_ = input.readUInt32();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
type_ = value;
}
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = checkpointCmd_.toBuilder();
}
checkpointCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checkpointCmd_);
checkpointCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public NamenodeCommandProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new NamenodeCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.NamenodeCommandProto.Type}
*/
public enum Type
implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum {
/**
* NamenodeCommand = 0;
*
*
* Base command
*
*/
NamenodeCommand(0, 0),
/**
* CheckPointCommand = 1;
*
*
* Check point command
*
*/
CheckPointCommand(1, 1),
;
/**
* NamenodeCommand = 0;
*
*
* Base command
*
*/
public static final int NamenodeCommand_VALUE = 0;
/**
* CheckPointCommand = 1;
*
*
* Check point command
*
*/
public static final int CheckPointCommand_VALUE = 1;
public final int getNumber() { return value; }
public static Type valueOf(int value) {
switch (value) {
case 0: return NamenodeCommand;
case 1: return CheckPointCommand;
default: return null;
}
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() {
public Type findValueByNumber(int number) {
return Type.valueOf(number);
}
};
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Type[] VALUES = values();
public static Type valueOf(
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Type(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.NamenodeCommandProto.Type)
}
private int bitField0_;
// required uint32 action = 1;
public static final int ACTION_FIELD_NUMBER = 1;
private int action_;
/**
* required uint32 action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 action = 1;
*/
public int getAction() {
return action_;
}
// required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
public static final int TYPE_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type type_;
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type getType() {
return type_;
}
// optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
public static final int CHECKPOINTCMD_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto checkpointCmd_;
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public boolean hasCheckpointCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto getCheckpointCmd() {
return checkpointCmd_;
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() {
return checkpointCmd_;
}
private void initFields() {
action_ = 0;
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type.NamenodeCommand;
checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
if (hasCheckpointCmd()) {
if (!getCheckpointCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, action_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, type_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, checkpointCmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, action_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeEnumSize(2, type_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(3, checkpointCmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto) obj;
boolean result = true;
result = result && (hasAction() == other.hasAction());
if (hasAction()) {
result = result && (getAction()
== other.getAction());
}
result = result && (hasType() == other.hasType());
if (hasType()) {
result = result &&
(getType() == other.getType());
}
result = result && (hasCheckpointCmd() == other.hasCheckpointCmd());
if (hasCheckpointCmd()) {
result = result && getCheckpointCmd()
.equals(other.getCheckpointCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAction()) {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + getAction();
}
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getType());
}
if (hasCheckpointCmd()) {
hash = (37 * hash) + CHECKPOINTCMD_FIELD_NUMBER;
hash = (53 * hash) + getCheckpointCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeCommandProto}
*
*
**
* Command sent from one namenode to another namenode.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCheckpointCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
action_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type.NamenodeCommand;
bitField0_ = (bitField0_ & ~0x00000002);
if (checkpointCmdBuilder_ == null) {
checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance();
} else {
checkpointCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.action_ = action_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.type_ = type_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (checkpointCmdBuilder_ == null) {
result.checkpointCmd_ = checkpointCmd_;
} else {
result.checkpointCmd_ = checkpointCmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.getDefaultInstance()) return this;
if (other.hasAction()) {
setAction(other.getAction());
}
if (other.hasType()) {
setType(other.getType());
}
if (other.hasCheckpointCmd()) {
mergeCheckpointCmd(other.getCheckpointCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAction()) {
return false;
}
if (!hasType()) {
return false;
}
if (hasCheckpointCmd()) {
if (!getCheckpointCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 action = 1;
private int action_ ;
/**
* required uint32 action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 action = 1;
*/
public int getAction() {
return action_;
}
/**
* required uint32 action = 1;
*/
public Builder setAction(int value) {
bitField0_ |= 0x00000001;
action_ = value;
onChanged();
return this;
}
/**
* required uint32 action = 1;
*/
public Builder clearAction() {
bitField0_ = (bitField0_ & ~0x00000001);
action_ = 0;
onChanged();
return this;
}
// required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type.NamenodeCommand;
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type getType() {
return type_;
}
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
type_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000002);
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto.Type.NamenodeCommand;
onChanged();
return this;
}
// optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder> checkpointCmdBuilder_;
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public boolean hasCheckpointCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto getCheckpointCmd() {
if (checkpointCmdBuilder_ == null) {
return checkpointCmd_;
} else {
return checkpointCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public Builder setCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto value) {
if (checkpointCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checkpointCmd_ = value;
onChanged();
} else {
checkpointCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public Builder setCheckpointCmd(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder builderForValue) {
if (checkpointCmdBuilder_ == null) {
checkpointCmd_ = builderForValue.build();
onChanged();
} else {
checkpointCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public Builder mergeCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto value) {
if (checkpointCmdBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
checkpointCmd_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance()) {
checkpointCmd_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.newBuilder(checkpointCmd_).mergeFrom(value).buildPartial();
} else {
checkpointCmd_ = value;
}
onChanged();
} else {
checkpointCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public Builder clearCheckpointCmd() {
if (checkpointCmdBuilder_ == null) {
checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.getDefaultInstance();
onChanged();
} else {
checkpointCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder getCheckpointCmdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getCheckpointCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() {
if (checkpointCmdBuilder_ != null) {
return checkpointCmdBuilder_.getMessageOrBuilder();
} else {
return checkpointCmd_;
}
}
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder>
getCheckpointCmdFieldBuilder() {
if (checkpointCmdBuilder_ == null) {
checkpointCmdBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProtoOrBuilder>(
checkpointCmd_,
getParentForChildren(),
isClean());
checkpointCmd_ = null;
}
return checkpointCmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamenodeCommandProto)
}
static {
defaultInstance = new NamenodeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.NamenodeCommandProto)
}
public interface VersionRequestProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.VersionRequestProto}
*
*
**
* void request
*
*/
public static final class VersionRequestProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements VersionRequestProtoOrBuilder {
// Use VersionRequestProto.newBuilder() to construct.
private VersionRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private VersionRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final VersionRequestProto defaultInstance;
public static VersionRequestProto getDefaultInstance() {
return defaultInstance;
}
public VersionRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VersionRequestProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public VersionRequestProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new VersionRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.VersionRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.VersionRequestProto)
}
static {
defaultInstance = new VersionRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.VersionRequestProto)
}
public interface VersionResponseProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.NamespaceInfoProto info = 1;
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
boolean hasInfo();
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getInfo();
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.VersionResponseProto}
*
*
**
* Version response from namenode.
*
*/
public static final class VersionResponseProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements VersionResponseProtoOrBuilder {
// Use VersionResponseProto.newBuilder() to construct.
private VersionResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private VersionResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final VersionResponseProto defaultInstance;
public static VersionResponseProto getDefaultInstance() {
return defaultInstance;
}
public VersionResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VersionResponseProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = info_.toBuilder();
}
info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(info_);
info_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public VersionResponseProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new VersionResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.NamespaceInfoProto info = 1;
public static final int INFO_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto info_;
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public boolean hasInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getInfo() {
return info_;
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() {
return info_;
}
private void initFields() {
info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, info_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(1, info_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto) obj;
boolean result = true;
result = result && (hasInfo() == other.hasInfo());
if (hasInfo()) {
result = result && getInfo()
.equals(other.getInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasInfo()) {
hash = (37 * hash) + INFO_FIELD_NUMBER;
hash = (53 * hash) + getInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.VersionResponseProto}
*
*
**
* Version response from namenode.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (infoBuilder_ == null) {
info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance();
} else {
infoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (infoBuilder_ == null) {
result.info_ = info_;
} else {
result.info_ = infoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance()) return this;
if (other.hasInfo()) {
mergeInfo(other.getInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasInfo()) {
return false;
}
if (!getInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.NamespaceInfoProto info = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder> infoBuilder_;
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public boolean hasInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto getInfo() {
if (infoBuilder_ == null) {
return info_;
} else {
return infoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) {
if (infoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
info_ = value;
onChanged();
} else {
infoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public Builder setInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder builderForValue) {
if (infoBuilder_ == null) {
info_ = builderForValue.build();
onChanged();
} else {
infoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto value) {
if (infoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
info_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance()) {
info_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.newBuilder(info_).mergeFrom(value).buildPartial();
} else {
info_ = value;
}
onChanged();
} else {
infoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public Builder clearInfo() {
if (infoBuilder_ == null) {
info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.getDefaultInstance();
onChanged();
} else {
infoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder getInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() {
if (infoBuilder_ != null) {
return infoBuilder_.getMessageOrBuilder();
} else {
return info_;
}
}
/**
* required .hadoop.hdfs.NamespaceInfoProto info = 1;
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder>
getInfoFieldBuilder() {
if (infoBuilder_ == null) {
infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProtoOrBuilder>(
info_,
getParentForChildren(),
isClean());
info_ = null;
}
return infoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.VersionResponseProto)
}
static {
defaultInstance = new VersionResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.VersionResponseProto)
}
public interface StorageInfoProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required uint32 layoutVersion = 1;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
boolean hasLayoutVersion();
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
int getLayoutVersion();
// required uint32 namespceID = 2;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
boolean hasNamespceID();
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
int getNamespceID();
// required string clusterID = 3;
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
boolean hasClusterID();
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
java.lang.String getClusterID();
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getClusterIDBytes();
// required uint64 cTime = 4;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
boolean hasCTime();
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
long getCTime();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageInfoProto}
*
*
**
* Common node information shared by all the nodes in the cluster
*
*/
public static final class StorageInfoProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements StorageInfoProtoOrBuilder {
// Use StorageInfoProto.newBuilder() to construct.
private StorageInfoProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageInfoProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageInfoProto defaultInstance;
public static StorageInfoProto getDefaultInstance() {
return defaultInstance;
}
public StorageInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageInfoProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
layoutVersion_ = input.readUInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
namespceID_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
clusterID_ = input.readBytes();
break;
}
case 32: {
bitField0_ |= 0x00000008;
cTime_ = input.readUInt64();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public StorageInfoProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new StorageInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 layoutVersion = 1;
public static final int LAYOUTVERSION_FIELD_NUMBER = 1;
private int layoutVersion_;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public int getLayoutVersion() {
return layoutVersion_;
}
// required uint32 namespceID = 2;
public static final int NAMESPCEID_FIELD_NUMBER = 2;
private int namespceID_;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public boolean hasNamespceID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public int getNamespceID() {
return namespceID_;
}
// required string clusterID = 3;
public static final int CLUSTERID_FIELD_NUMBER = 3;
private java.lang.Object clusterID_;
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public boolean hasClusterID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public java.lang.String getClusterID() {
java.lang.Object ref = clusterID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clusterID_ = s;
}
return s;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getClusterIDBytes() {
java.lang.Object ref = clusterID_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterID_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required uint64 cTime = 4;
public static final int CTIME_FIELD_NUMBER = 4;
private long cTime_;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public boolean hasCTime() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public long getCTime() {
return cTime_;
}
private void initFields() {
layoutVersion_ = 0;
namespceID_ = 0;
clusterID_ = "";
cTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLayoutVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNamespceID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasClusterID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCTime()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, layoutVersion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, namespceID_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getClusterIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, cTime_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, layoutVersion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, namespceID_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getClusterIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, cTime_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto) obj;
boolean result = true;
result = result && (hasLayoutVersion() == other.hasLayoutVersion());
if (hasLayoutVersion()) {
result = result && (getLayoutVersion()
== other.getLayoutVersion());
}
result = result && (hasNamespceID() == other.hasNamespceID());
if (hasNamespceID()) {
result = result && (getNamespceID()
== other.getNamespceID());
}
result = result && (hasClusterID() == other.hasClusterID());
if (hasClusterID()) {
result = result && getClusterID()
.equals(other.getClusterID());
}
result = result && (hasCTime() == other.hasCTime());
if (hasCTime()) {
result = result && (getCTime()
== other.getCTime());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLayoutVersion()) {
hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER;
hash = (53 * hash) + getLayoutVersion();
}
if (hasNamespceID()) {
hash = (37 * hash) + NAMESPCEID_FIELD_NUMBER;
hash = (53 * hash) + getNamespceID();
}
if (hasClusterID()) {
hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
hash = (53 * hash) + getClusterID().hashCode();
}
if (hasCTime()) {
hash = (37 * hash) + CTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCTime());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageInfoProto}
*
*
**
* Common node information shared by all the nodes in the cluster
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
layoutVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
namespceID_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
clusterID_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
cTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.layoutVersion_ = layoutVersion_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.namespceID_ = namespceID_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.clusterID_ = clusterID_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.cTime_ = cTime_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) return this;
if (other.hasLayoutVersion()) {
setLayoutVersion(other.getLayoutVersion());
}
if (other.hasNamespceID()) {
setNamespceID(other.getNamespceID());
}
if (other.hasClusterID()) {
bitField0_ |= 0x00000004;
clusterID_ = other.clusterID_;
onChanged();
}
if (other.hasCTime()) {
setCTime(other.getCTime());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLayoutVersion()) {
return false;
}
if (!hasNamespceID()) {
return false;
}
if (!hasClusterID()) {
return false;
}
if (!hasCTime()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 layoutVersion = 1;
private int layoutVersion_ ;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public int getLayoutVersion() {
return layoutVersion_;
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public Builder setLayoutVersion(int value) {
bitField0_ |= 0x00000001;
layoutVersion_ = value;
onChanged();
return this;
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public Builder clearLayoutVersion() {
bitField0_ = (bitField0_ & ~0x00000001);
layoutVersion_ = 0;
onChanged();
return this;
}
// required uint32 namespceID = 2;
private int namespceID_ ;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public boolean hasNamespceID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public int getNamespceID() {
return namespceID_;
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public Builder setNamespceID(int value) {
bitField0_ |= 0x00000002;
namespceID_ = value;
onChanged();
return this;
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public Builder clearNamespceID() {
bitField0_ = (bitField0_ & ~0x00000002);
namespceID_ = 0;
onChanged();
return this;
}
// required string clusterID = 3;
private java.lang.Object clusterID_ = "";
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public boolean hasClusterID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public java.lang.String getClusterID() {
java.lang.Object ref = clusterID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
clusterID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getClusterIDBytes() {
java.lang.Object ref = clusterID_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterID_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder setClusterID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
clusterID_ = value;
onChanged();
return this;
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder clearClusterID() {
bitField0_ = (bitField0_ & ~0x00000004);
clusterID_ = getDefaultInstance().getClusterID();
onChanged();
return this;
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder setClusterIDBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
clusterID_ = value;
onChanged();
return this;
}
// required uint64 cTime = 4;
private long cTime_ ;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public boolean hasCTime() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public long getCTime() {
return cTime_;
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public Builder setCTime(long value) {
bitField0_ |= 0x00000008;
cTime_ = value;
onChanged();
return this;
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public Builder clearCTime() {
bitField0_ = (bitField0_ & ~0x00000008);
cTime_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageInfoProto)
}
static {
defaultInstance = new StorageInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageInfoProto)
}
public interface NamenodeRegistrationProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required string rpcAddress = 1;
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
boolean hasRpcAddress();
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
java.lang.String getRpcAddress();
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getRpcAddressBytes();
// required string httpAddress = 2;
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
boolean hasHttpAddress();
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
java.lang.String getHttpAddress();
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getHttpAddressBytes();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
boolean hasRole();
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole();
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto}
*
*
**
* Information sent by a namenode to identify itself to the primary namenode.
*
*/
public static final class NamenodeRegistrationProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements NamenodeRegistrationProtoOrBuilder {
// Use NamenodeRegistrationProto.newBuilder() to construct.
private NamenodeRegistrationProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NamenodeRegistrationProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NamenodeRegistrationProto defaultInstance;
public static NamenodeRegistrationProto getDefaultInstance() {
return defaultInstance;
}
public NamenodeRegistrationProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NamenodeRegistrationProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
rpcAddress_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
httpAddress_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
role_ = value;
}
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public NamenodeRegistrationProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new NamenodeRegistrationProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto}
*/
public enum NamenodeRoleProto
implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum {
/**
* NAMENODE = 1;
*/
NAMENODE(0, 1),
/**
* BACKUP = 2;
*/
BACKUP(1, 2),
/**
* CHECKPOINT = 3;
*/
CHECKPOINT(2, 3),
;
/**
* NAMENODE = 1;
*/
public static final int NAMENODE_VALUE = 1;
/**
* BACKUP = 2;
*/
public static final int BACKUP_VALUE = 2;
/**
* CHECKPOINT = 3;
*/
public static final int CHECKPOINT_VALUE = 3;
public final int getNumber() { return value; }
public static NamenodeRoleProto valueOf(int value) {
switch (value) {
case 1: return NAMENODE;
case 2: return BACKUP;
case 3: return CHECKPOINT;
default: return null;
}
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() {
public NamenodeRoleProto findValueByNumber(int number) {
return NamenodeRoleProto.valueOf(number);
}
};
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDescriptor().getEnumTypes().get(0);
}
private static final NamenodeRoleProto[] VALUES = values();
public static NamenodeRoleProto valueOf(
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private NamenodeRoleProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto)
}
private int bitField0_;
// required string rpcAddress = 1;
public static final int RPCADDRESS_FIELD_NUMBER = 1;
private java.lang.Object rpcAddress_;
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public boolean hasRpcAddress() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public java.lang.String getRpcAddress() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
rpcAddress_ = s;
}
return s;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getRpcAddressBytes() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
rpcAddress_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required string httpAddress = 2;
public static final int HTTPADDRESS_FIELD_NUMBER = 2;
private java.lang.Object httpAddress_;
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public boolean hasHttpAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public java.lang.String getHttpAddress() {
java.lang.Object ref = httpAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs =
(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
httpAddress_ = s;
}
return s;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getHttpAddressBytes() {
java.lang.Object ref = httpAddress_;
if (ref instanceof java.lang.String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
httpAddress_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
public static final int STORAGEINFO_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
public static final int ROLE_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto role_;
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public boolean hasRole() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() {
return role_;
}
private void initFields() {
rpcAddress_ = "";
httpAddress_ = "";
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRpcAddress()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasHttpAddress()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getRpcAddressBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getHttpAddressBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, storageInfo_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, role_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getRpcAddressBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getHttpAddressBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeMessageSize(3, storageInfo_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeEnumSize(4, role_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto) obj;
boolean result = true;
result = result && (hasRpcAddress() == other.hasRpcAddress());
if (hasRpcAddress()) {
result = result && getRpcAddress()
.equals(other.getRpcAddress());
}
result = result && (hasHttpAddress() == other.hasHttpAddress());
if (hasHttpAddress()) {
result = result && getHttpAddress()
.equals(other.getHttpAddress());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result && (hasRole() == other.hasRole());
if (hasRole()) {
result = result &&
(getRole() == other.getRole());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRpcAddress()) {
hash = (37 * hash) + RPCADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getRpcAddress().hashCode();
}
if (hasHttpAddress()) {
hash = (37 * hash) + HTTPADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getHttpAddress().hashCode();
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
if (hasRole()) {
hash = (37 * hash) + ROLE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getRole());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto}
*
*
**
* Information sent by a namenode to identify itself to the primary namenode.
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
rpcAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
httpAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.rpcAddress_ = rpcAddress_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.httpAddress_ = httpAddress_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.role_ = role_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.getDefaultInstance()) return this;
if (other.hasRpcAddress()) {
bitField0_ |= 0x00000001;
rpcAddress_ = other.rpcAddress_;
onChanged();
}
if (other.hasHttpAddress()) {
bitField0_ |= 0x00000002;
httpAddress_ = other.httpAddress_;
onChanged();
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
if (other.hasRole()) {
setRole(other.getRole());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRpcAddress()) {
return false;
}
if (!hasHttpAddress()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string rpcAddress = 1;
private java.lang.Object rpcAddress_ = "";
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public boolean hasRpcAddress() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public java.lang.String getRpcAddress() {
java.lang.Object ref = rpcAddress_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
rpcAddress_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getRpcAddressBytes() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
rpcAddress_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder setRpcAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
rpcAddress_ = value;
onChanged();
return this;
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder clearRpcAddress() {
bitField0_ = (bitField0_ & ~0x00000001);
rpcAddress_ = getDefaultInstance().getRpcAddress();
onChanged();
return this;
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder setRpcAddressBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
rpcAddress_ = value;
onChanged();
return this;
}
// required string httpAddress = 2;
private java.lang.Object httpAddress_ = "";
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public boolean hasHttpAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public java.lang.String getHttpAddress() {
java.lang.Object ref = httpAddress_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref)
.toStringUtf8();
httpAddress_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString
getHttpAddressBytes() {
java.lang.Object ref = httpAddress_;
if (ref instanceof String) {
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b =
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
httpAddress_ = b;
return b;
} else {
return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder setHttpAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
httpAddress_ = value;
onChanged();
return this;
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder clearHttpAddress() {
bitField0_ = (bitField0_ & ~0x00000002);
httpAddress_ = getDefaultInstance().getHttpAddress();
onChanged();
return this;
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder setHttpAddressBytes(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
httpAddress_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public boolean hasRole() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() {
return role_;
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public Builder setRole(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
role_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public Builder clearRole() {
bitField0_ = (bitField0_ & ~0x00000008);
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamenodeRegistrationProto)
}
static {
defaultInstance = new NamenodeRegistrationProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.NamenodeRegistrationProto)
}
public interface NNHAStatusHeartbeatProtoOrBuilder
extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
boolean hasState();
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState();
// required uint64 txid = 2;
/**
* required uint64 txid = 2;
*/
boolean hasTxid();
/**
* required uint64 txid = 2;
*/
long getTxid();
}
/**
* Protobuf type {@code hadoop.hdfs.NNHAStatusHeartbeatProto}
*
*
**
* state - State the NN is in when returning response to the DN
* txid - Highest transaction ID this NN has seen
*
*/
public static final class NNHAStatusHeartbeatProto extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage
implements NNHAStatusHeartbeatProtoOrBuilder {
// Use NNHAStatusHeartbeatProto.newBuilder() to construct.
private NNHAStatusHeartbeatProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NNHAStatusHeartbeatProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NNHAStatusHeartbeatProto defaultInstance;
public static NNHAStatusHeartbeatProto getDefaultInstance() {
return defaultInstance;
}
public NNHAStatusHeartbeatProto getDefaultInstanceForType() {
return defaultInstance;
}
private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NNHAStatusHeartbeatProto(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State value = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
txid_ = input.readUInt64();
break;
}
}
}
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder.class);
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER =
new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() {
public NNHAStatusHeartbeatProto parsePartialFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return new NNHAStatusHeartbeatProto(input, extensionRegistry);
}
};
@java.lang.Override
public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.NNHAStatusHeartbeatProto.State}
*/
public enum State
implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum {
/**
* ACTIVE = 0;
*/
ACTIVE(0, 0),
/**
* STANDBY = 1;
*/
STANDBY(1, 1),
;
/**
* ACTIVE = 0;
*/
public static final int ACTIVE_VALUE = 0;
/**
* STANDBY = 1;
*/
public static final int STANDBY_VALUE = 1;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return ACTIVE;
case 1: return STANDBY;
default: return null;
}
}
public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.NNHAStatusHeartbeatProto.State)
}
private int bitField0_;
// required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State state_;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
// required uint64 txid = 2;
public static final int TXID_FIELD_NUMBER = 2;
private long txid_;
/**
* required uint64 txid = 2;
*/
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 txid = 2;
*/
public long getTxid() {
return txid_;
}
private void initFields() {
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
txid_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTxid()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, txid_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, txid_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasTxid() == other.hasTxid());
if (hasTxid()) {
result = result && (getTxid()
== other.getTxid());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasTxid()) {
hash = (37 * hash) + TXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTxid());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(byte[] data)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
byte[] data,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseDelimitedFrom(
java.io.InputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parseFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.NNHAStatusHeartbeatProto}
*
*
**
* state - State the NN is in when returning response to the DN
* txid - Highest transaction ID this NN has seen
*
*/
public static final class Builder extends
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder {
public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor;
}
protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
bitField0_ = (bitField0_ & ~0x00000001);
txid_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.txid_ = txid_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
if (other.hasTxid()) {
setTxid(other.getTxid());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
if (!hasTxid()) {
return false;
}
return true;
}
public Builder mergeFrom(
io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input,
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto.State state = 1;
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
onChanged();
return this;
}
// required uint64 txid = 2;
private long txid_ ;
/**
* required uint64 txid = 2;
*/
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 txid = 2;
*/
public long getTxid() {
return txid_;
}
/**
* required uint64 txid = 2;
*/
public Builder setTxid(long value) {
bitField0_ |= 0x00000002;
txid_ = value;
onChanged();
return this;
}
/**
* required uint64 txid = 2;
*/
public Builder clearTxid() {
bitField0_ = (bitField0_ & ~0x00000002);
txid_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.NNHAStatusHeartbeatProto)
}
static {
defaultInstance = new NNHAStatusHeartbeatProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.NNHAStatusHeartbeatProto)
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlockKeyProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_VersionRequestProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_VersionResponseProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable;
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor;
private static
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_fieldAccessorTable;
public static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\020HdfsServer.proto\022\013hadoop.hdfs\032\nhdfs.pr" +
"oto\032\027HAServiceProtocol.proto\"D\n\rBlockKey" +
"Proto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpiryDate\030\002 \002(\004" +
"\022\020\n\010keyBytes\030\003 \001(\014\"\304\001\n\026ExportedBlockKeys" +
"Proto\022\033\n\023isBlockTokenEnabled\030\001 \002(\010\022\031\n\021ke" +
"yUpdateInterval\030\002 \002(\004\022\025\n\rtokenLifeTime\030\003" +
" \002(\004\022.\n\ncurrentKey\030\004 \002(\0132\032.hadoop.hdfs.B" +
"lockKeyProto\022+\n\007allKeys\030\005 \003(\0132\032.hadoop.h" +
"dfs.BlockKeyProto\"\334\001\n\027BlockWithLocations" +
"Proto\022&\n\005block\030\001 \002(\0132\027.hadoop.hdfs.Block",
"Proto\022\025\n\rdatanodeUuids\030\002 \003(\t\022\024\n\014storageU" +
"uids\030\003 \003(\t\0223\n\014storageTypes\030\004 \003(\0162\035.hadoo" +
"p.hdfs.StorageTypeProto\022\017\n\007indices\030\005 \001(\014" +
"\022\024\n\014dataBlockNum\030\006 \001(\r\022\020\n\010cellSize\030\007 \001(\r" +
"\"P\n\030BlocksWithLocationsProto\0224\n\006blocks\030\001" +
" \003(\0132$.hadoop.hdfs.BlockWithLocationsPro" +
"to\"U\n\022RemoteEditLogProto\022\021\n\tstartTxId\030\001 " +
"\002(\004\022\017\n\007endTxId\030\002 \002(\004\022\033\n\014isInProgress\030\003 \001" +
"(\010:\005false\"c\n\032RemoteEditLogManifestProto\022" +
"-\n\004logs\030\001 \003(\0132\037.hadoop.hdfs.RemoteEditLo",
"gProto\022\026\n\016committedTxnId\030\002 \001(\004\"\361\001\n\022Names" +
"paceInfoProto\022\024\n\014buildVersion\030\001 \002(\t\022\016\n\006u" +
"nused\030\002 \002(\r\022\023\n\013blockPoolID\030\003 \002(\t\0222\n\013stor" +
"ageInfo\030\004 \002(\0132\035.hadoop.hdfs.StorageInfoP" +
"roto\022\027\n\017softwareVersion\030\005 \002(\t\022\027\n\014capabil" +
"ities\030\006 \001(\004:\0010\022:\n\005state\030\007 \001(\0162+.hadoop.h" +
"dfs.NNHAStatusHeartbeatProto.State\"\331\001\n\024R" +
"ecoveringBlockProto\022\023\n\013newGenStamp\030\001 \002(\004" +
"\022-\n\005block\030\002 \002(\0132\036.hadoop.hdfs.LocatedBlo" +
"ckProto\022.\n\rtruncateBlock\030\003 \001(\0132\027.hadoop.",
"hdfs.BlockProto\0227\n\010ecPolicy\030\004 \001(\0132%.hado" +
"op.hdfs.ErasureCodingPolicyProto\022\024\n\014bloc" +
"kIndices\030\005 \001(\014\"\235\001\n\030CheckpointSignaturePr" +
"oto\022\023\n\013blockPoolId\030\001 \002(\t\022 \n\030mostRecentCh" +
"eckpointTxId\030\002 \002(\004\022\026\n\016curSegmentTxId\030\003 \002" +
"(\004\0222\n\013storageInfo\030\004 \002(\0132\035.hadoop.hdfs.St" +
"orageInfoProto\"m\n\026CheckpointCommandProto" +
"\0228\n\tsignature\030\001 \002(\0132%.hadoop.hdfs.Checkp" +
"ointSignatureProto\022\031\n\021needToReturnImage\030" +
"\002 \002(\010\"\314\001\n\024NamenodeCommandProto\022\016\n\006action",
"\030\001 \002(\r\0224\n\004type\030\002 \002(\0162&.hadoop.hdfs.Namen" +
"odeCommandProto.Type\022:\n\rcheckpointCmd\030\003 " +
"\001(\0132#.hadoop.hdfs.CheckpointCommandProto" +
"\"2\n\004Type\022\023\n\017NamenodeCommand\020\000\022\025\n\021CheckPo" +
"intCommand\020\001\"\025\n\023VersionRequestProto\"E\n\024V" +
"ersionResponseProto\022-\n\004info\030\001 \002(\0132\037.hado" +
"op.hdfs.NamespaceInfoProto\"_\n\020StorageInf" +
"oProto\022\025\n\rlayoutVersion\030\001 \002(\r\022\022\n\nnamespc" +
"eID\030\002 \002(\r\022\021\n\tclusterID\030\003 \002(\t\022\r\n\005cTime\030\004 " +
"\002(\004\"\211\002\n\031NamenodeRegistrationProto\022\022\n\nrpc",
"Address\030\001 \002(\t\022\023\n\013httpAddress\030\002 \002(\t\0222\n\013st" +
"orageInfo\030\003 \002(\0132\035.hadoop.hdfs.StorageInf" +
"oProto\022P\n\004role\030\004 \001(\01628.hadoop.hdfs.Namen" +
"odeRegistrationProto.NamenodeRoleProto:\010" +
"NAMENODE\"=\n\021NamenodeRoleProto\022\014\n\010NAMENOD" +
"E\020\001\022\n\n\006BACKUP\020\002\022\016\n\nCHECKPOINT\020\003\"\206\001\n\030NNHA" +
"StatusHeartbeatProto\022:\n\005state\030\001 \002(\0162+.ha" +
"doop.hdfs.NNHAStatusHeartbeatProto.State" +
"\022\014\n\004txid\030\002 \002(\004\" \n\005State\022\n\n\006ACTIVE\020\000\022\013\n\007S" +
"TANDBY\020\001*L\n\021ReplicaStateProto\022\r\n\tFINALIZ",
"ED\020\000\022\007\n\003RBW\020\001\022\007\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPO" +
"RARY\020\004B<\n%org.apache.hadoop.hdfs.protoco" +
"l.protoB\020HdfsServerProtos\240\001\001"
};
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry assignDescriptors(
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hadoop_hdfs_BlockKeyProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_BlockKeyProto_descriptor,
new java.lang.String[] { "KeyId", "ExpiryDate", "KeyBytes", });
internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor,
new java.lang.String[] { "IsBlockTokenEnabled", "KeyUpdateInterval", "TokenLifeTime", "CurrentKey", "AllKeys", });
internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor,
new java.lang.String[] { "Block", "DatanodeUuids", "StorageUuids", "StorageTypes", "Indices", "DataBlockNum", "CellSize", });
internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor,
new java.lang.String[] { "Blocks", });
internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor,
new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor,
new java.lang.String[] { "Logs", "CommittedTxnId", });
internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor,
new java.lang.String[] { "BuildVersion", "Unused", "BlockPoolID", "StorageInfo", "SoftwareVersion", "Capabilities", "State", });
internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor,
new java.lang.String[] { "NewGenStamp", "Block", "TruncateBlock", "EcPolicy", "BlockIndices", });
internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor,
new java.lang.String[] { "BlockPoolId", "MostRecentCheckpointTxId", "CurSegmentTxId", "StorageInfo", });
internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor,
new java.lang.String[] { "Signature", "NeedToReturnImage", });
internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor,
new java.lang.String[] { "Action", "Type", "CheckpointCmd", });
internal_static_hadoop_hdfs_VersionRequestProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_VersionRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_VersionResponseProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_VersionResponseProto_descriptor,
new java.lang.String[] { "Info", });
internal_static_hadoop_hdfs_StorageInfoProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_StorageInfoProto_descriptor,
new java.lang.String[] { "LayoutVersion", "NamespceID", "ClusterID", "CTime", });
internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor,
new java.lang.String[] { "RpcAddress", "HttpAddress", "StorageInfo", "Role", });
internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_fieldAccessorTable = new
io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_NNHAStatusHeartbeatProto_descriptor,
new java.lang.String[] { "State", "Txid", });
return null;
}
};
io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}