All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.HdfsProtos Maven / Gradle / Ivy

There is a newer version: 3.4.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: hdfs.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class HdfsProtos {
  private HdfsProtos() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  /**
   * Protobuf enum {@code hadoop.hdfs.StorageTypeProto}
   *
   * 
   **
   * Types of recognized storage media.
   * 
*/ public enum StorageTypeProto implements com.google.protobuf.ProtocolMessageEnum { /** * DISK = 1; */ DISK(0, 1), /** * SSD = 2; */ SSD(1, 2), /** * ARCHIVE = 3; */ ARCHIVE(2, 3), /** * RAM_DISK = 4; */ RAM_DISK(3, 4), ; /** * DISK = 1; */ public static final int DISK_VALUE = 1; /** * SSD = 2; */ public static final int SSD_VALUE = 2; /** * ARCHIVE = 3; */ public static final int ARCHIVE_VALUE = 3; /** * RAM_DISK = 4; */ public static final int RAM_DISK_VALUE = 4; public final int getNumber() { return value; } public static StorageTypeProto valueOf(int value) { switch (value) { case 1: return DISK; case 2: return SSD; case 3: return ARCHIVE; case 4: return RAM_DISK; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public StorageTypeProto findValueByNumber(int number) { return StorageTypeProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0); } private static final StorageTypeProto[] VALUES = values(); public static StorageTypeProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private StorageTypeProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto) } /** * Protobuf enum {@code hadoop.hdfs.CipherSuiteProto} * *
   **
   * Cipher suite.
   * 
*/ public enum CipherSuiteProto implements com.google.protobuf.ProtocolMessageEnum { /** * UNKNOWN = 1; */ UNKNOWN(0, 1), /** * AES_CTR_NOPADDING = 2; */ AES_CTR_NOPADDING(1, 2), ; /** * UNKNOWN = 1; */ public static final int UNKNOWN_VALUE = 1; /** * AES_CTR_NOPADDING = 2; */ public static final int AES_CTR_NOPADDING_VALUE = 2; public final int getNumber() { return value; } public static CipherSuiteProto valueOf(int value) { switch (value) { case 1: return UNKNOWN; case 2: return AES_CTR_NOPADDING; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public CipherSuiteProto findValueByNumber(int number) { return CipherSuiteProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1); } private static final CipherSuiteProto[] VALUES = values(); public static CipherSuiteProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private CipherSuiteProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto) } /** * Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto} * *
   **
   * Crypto protocol version used to access encrypted files.
   * 
*/ public enum CryptoProtocolVersionProto implements com.google.protobuf.ProtocolMessageEnum { /** * UNKNOWN_PROTOCOL_VERSION = 1; */ UNKNOWN_PROTOCOL_VERSION(0, 1), /** * ENCRYPTION_ZONES = 2; */ ENCRYPTION_ZONES(1, 2), ; /** * UNKNOWN_PROTOCOL_VERSION = 1; */ public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1; /** * ENCRYPTION_ZONES = 2; */ public static final int ENCRYPTION_ZONES_VALUE = 2; public final int getNumber() { return value; } public static CryptoProtocolVersionProto valueOf(int value) { switch (value) { case 1: return UNKNOWN_PROTOCOL_VERSION; case 2: return ENCRYPTION_ZONES; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public CryptoProtocolVersionProto findValueByNumber(int number) { return CryptoProtocolVersionProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2); } private static final CryptoProtocolVersionProto[] VALUES = values(); public static CryptoProtocolVersionProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private CryptoProtocolVersionProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto) } /** * Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto} * *
   **
   * Checksum algorithms/types used in HDFS
   * Make sure this enum's integer values match enum values' id properties defined
   * in org.apache.hadoop.util.DataChecksum.Type
   * 
*/ public enum ChecksumTypeProto implements com.google.protobuf.ProtocolMessageEnum { /** * CHECKSUM_NULL = 0; */ CHECKSUM_NULL(0, 0), /** * CHECKSUM_CRC32 = 1; */ CHECKSUM_CRC32(1, 1), /** * CHECKSUM_CRC32C = 2; */ CHECKSUM_CRC32C(2, 2), ; /** * CHECKSUM_NULL = 0; */ public static final int CHECKSUM_NULL_VALUE = 0; /** * CHECKSUM_CRC32 = 1; */ public static final int CHECKSUM_CRC32_VALUE = 1; /** * CHECKSUM_CRC32C = 2; */ public static final int CHECKSUM_CRC32C_VALUE = 2; public final int getNumber() { return value; } public static ChecksumTypeProto valueOf(int value) { switch (value) { case 0: return CHECKSUM_NULL; case 1: return CHECKSUM_CRC32; case 2: return CHECKSUM_CRC32C; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public ChecksumTypeProto findValueByNumber(int number) { return ChecksumTypeProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3); } private static final ChecksumTypeProto[] VALUES = values(); public static ChecksumTypeProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private ChecksumTypeProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto) } /** * Protobuf enum {@code hadoop.hdfs.ReplicaStateProto} * *
   **
   * State of a block replica at a datanode
   * 
*/ public enum ReplicaStateProto implements com.google.protobuf.ProtocolMessageEnum { /** * FINALIZED = 0; * *
     * State of a replica when it is not modified
     * 
*/ FINALIZED(0, 0), /** * RBW = 1; * *
     * State of replica that is being written to
     * 
*/ RBW(1, 1), /** * RWR = 2; * *
     * State of replica that is waiting to be recovered
     * 
*/ RWR(2, 2), /** * RUR = 3; * *
     * State of replica that is under recovery
     * 
*/ RUR(3, 3), /** * TEMPORARY = 4; * *
     * State of replica that is created for replication
     * 
*/ TEMPORARY(4, 4), ; /** * FINALIZED = 0; * *
     * State of a replica when it is not modified
     * 
*/ public static final int FINALIZED_VALUE = 0; /** * RBW = 1; * *
     * State of replica that is being written to
     * 
*/ public static final int RBW_VALUE = 1; /** * RWR = 2; * *
     * State of replica that is waiting to be recovered
     * 
*/ public static final int RWR_VALUE = 2; /** * RUR = 3; * *
     * State of replica that is under recovery
     * 
*/ public static final int RUR_VALUE = 3; /** * TEMPORARY = 4; * *
     * State of replica that is created for replication
     * 
*/ public static final int TEMPORARY_VALUE = 4; public final int getNumber() { return value; } public static ReplicaStateProto valueOf(int value) { switch (value) { case 0: return FINALIZED; case 1: return RBW; case 2: return RWR; case 3: return RUR; case 4: return TEMPORARY; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public ReplicaStateProto findValueByNumber(int number) { return ReplicaStateProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4); } private static final ReplicaStateProto[] VALUES = values(); public static ReplicaStateProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private ReplicaStateProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ReplicaStateProto) } public interface ExtendedBlockProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string poolId = 1; /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ boolean hasPoolId(); /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ java.lang.String getPoolId(); /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ com.google.protobuf.ByteString getPoolIdBytes(); // required uint64 blockId = 2; /** * required uint64 blockId = 2; * *
     * the local id within a pool
     * 
*/ boolean hasBlockId(); /** * required uint64 blockId = 2; * *
     * the local id within a pool
     * 
*/ long getBlockId(); // required uint64 generationStamp = 3; /** * required uint64 generationStamp = 3; */ boolean hasGenerationStamp(); /** * required uint64 generationStamp = 3; */ long getGenerationStamp(); // optional uint64 numBytes = 4 [default = 0]; /** * optional uint64 numBytes = 4 [default = 0]; * *
     * len does not belong in ebid 
     * 
*/ boolean hasNumBytes(); /** * optional uint64 numBytes = 4 [default = 0]; * *
     * len does not belong in ebid 
     * 
*/ long getNumBytes(); } /** * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} * *
   **
   * Extended block idenfies a block
   * 
*/ public static final class ExtendedBlockProto extends com.google.protobuf.GeneratedMessage implements ExtendedBlockProtoOrBuilder { // Use ExtendedBlockProto.newBuilder() to construct. private ExtendedBlockProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ExtendedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ExtendedBlockProto defaultInstance; public static ExtendedBlockProto getDefaultInstance() { return defaultInstance; } public ExtendedBlockProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ExtendedBlockProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; poolId_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; blockId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; generationStamp_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; numBytes_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public ExtendedBlockProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ExtendedBlockProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string poolId = 1; public static final int POOLID_FIELD_NUMBER = 1; private java.lang.Object poolId_; /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ public boolean hasPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolId_ = s; } return s; } } /** * required string poolId = 1; * *
     * Block pool id - gloablly unique across clusters
     * 
*/ public com.google.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint64 blockId = 2; public static final int BLOCKID_FIELD_NUMBER = 2; private long blockId_; /** * required uint64 blockId = 2; * *
     * the local id within a pool
     * 
*/ public boolean hasBlockId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 blockId = 2; * *
     * the local id within a pool
     * 
*/ public long getBlockId() { return blockId_; } // required uint64 generationStamp = 3; public static final int GENERATIONSTAMP_FIELD_NUMBER = 3; private long generationStamp_; /** * required uint64 generationStamp = 3; */ public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 generationStamp = 3; */ public long getGenerationStamp() { return generationStamp_; } // optional uint64 numBytes = 4 [default = 0]; public static final int NUMBYTES_FIELD_NUMBER = 4; private long numBytes_; /** * optional uint64 numBytes = 4 [default = 0]; * *
     * len does not belong in ebid 
     * 
*/ public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 numBytes = 4 [default = 0]; * *
     * len does not belong in ebid 
     * 
*/ public long getNumBytes() { return numBytes_; } private void initFields() { poolId_ = ""; blockId_ = 0L; generationStamp_ = 0L; numBytes_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenerationStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, blockId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, generationStamp_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, numBytes_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, blockId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, generationStamp_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, numBytes_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj; boolean result = true; result = result && (hasPoolId() == other.hasPoolId()); if (hasPoolId()) { result = result && getPoolId() .equals(other.getPoolId()); } result = result && (hasBlockId() == other.hasBlockId()); if (hasBlockId()) { result = result && (getBlockId() == other.getBlockId()); } result = result && (hasGenerationStamp() == other.hasGenerationStamp()); if (hasGenerationStamp()) { result = result && (getGenerationStamp() == other.getGenerationStamp()); } result = result && (hasNumBytes() == other.hasNumBytes()); if (hasNumBytes()) { result = result && (getNumBytes() == other.getNumBytes()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPoolId()) { hash = (37 * hash) + POOLID_FIELD_NUMBER; hash = (53 * hash) + getPoolId().hashCode(); } if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockId()); } if (hasGenerationStamp()) { hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER; hash = (53 * hash) + hashLong(getGenerationStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNumBytes()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto} * *
     **
     * Extended block idenfies a block
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); poolId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); blockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); generationStamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); numBytes_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.poolId_ = poolId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.blockId_ = blockId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.generationStamp_ = generationStamp_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.numBytes_ = numBytes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this; if (other.hasPoolId()) { bitField0_ |= 0x00000001; poolId_ = other.poolId_; onChanged(); } if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenerationStamp()) { setGenerationStamp(other.getGenerationStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPoolId()) { return false; } if (!hasBlockId()) { return false; } if (!hasGenerationStamp()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string poolId = 1; private java.lang.Object poolId_ = ""; /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public boolean hasPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public java.lang.String getPoolId() { java.lang.Object ref = poolId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); poolId_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public com.google.protobuf.ByteString getPoolIdBytes() { java.lang.Object ref = poolId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public Builder setPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolId_ = value; onChanged(); return this; } /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public Builder clearPoolId() { bitField0_ = (bitField0_ & ~0x00000001); poolId_ = getDefaultInstance().getPoolId(); onChanged(); return this; } /** * required string poolId = 1; * *
       * Block pool id - gloablly unique across clusters
       * 
*/ public Builder setPoolIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolId_ = value; onChanged(); return this; } // required uint64 blockId = 2; private long blockId_ ; /** * required uint64 blockId = 2; * *
       * the local id within a pool
       * 
*/ public boolean hasBlockId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 blockId = 2; * *
       * the local id within a pool
       * 
*/ public long getBlockId() { return blockId_; } /** * required uint64 blockId = 2; * *
       * the local id within a pool
       * 
*/ public Builder setBlockId(long value) { bitField0_ |= 0x00000002; blockId_ = value; onChanged(); return this; } /** * required uint64 blockId = 2; * *
       * the local id within a pool
       * 
*/ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000002); blockId_ = 0L; onChanged(); return this; } // required uint64 generationStamp = 3; private long generationStamp_ ; /** * required uint64 generationStamp = 3; */ public boolean hasGenerationStamp() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 generationStamp = 3; */ public long getGenerationStamp() { return generationStamp_; } /** * required uint64 generationStamp = 3; */ public Builder setGenerationStamp(long value) { bitField0_ |= 0x00000004; generationStamp_ = value; onChanged(); return this; } /** * required uint64 generationStamp = 3; */ public Builder clearGenerationStamp() { bitField0_ = (bitField0_ & ~0x00000004); generationStamp_ = 0L; onChanged(); return this; } // optional uint64 numBytes = 4 [default = 0]; private long numBytes_ ; /** * optional uint64 numBytes = 4 [default = 0]; * *
       * len does not belong in ebid 
       * 
*/ public boolean hasNumBytes() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 numBytes = 4 [default = 0]; * *
       * len does not belong in ebid 
       * 
*/ public long getNumBytes() { return numBytes_; } /** * optional uint64 numBytes = 4 [default = 0]; * *
       * len does not belong in ebid 
       * 
*/ public Builder setNumBytes(long value) { bitField0_ |= 0x00000008; numBytes_ = value; onChanged(); return this; } /** * optional uint64 numBytes = 4 [default = 0]; * *
       * len does not belong in ebid 
       * 
*/ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000008); numBytes_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto) } static { defaultInstance = new ExtendedBlockProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto) } public interface DatanodeIDProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string ipAddr = 1; /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ boolean hasIpAddr(); /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ java.lang.String getIpAddr(); /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ com.google.protobuf.ByteString getIpAddrBytes(); // required string hostName = 2; /** * required string hostName = 2; * *
     * hostname
     * 
*/ boolean hasHostName(); /** * required string hostName = 2; * *
     * hostname
     * 
*/ java.lang.String getHostName(); /** * required string hostName = 2; * *
     * hostname
     * 
*/ com.google.protobuf.ByteString getHostNameBytes(); // required string datanodeUuid = 3; /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ boolean hasDatanodeUuid(); /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ java.lang.String getDatanodeUuid(); /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ com.google.protobuf.ByteString getDatanodeUuidBytes(); // required uint32 xferPort = 4; /** * required uint32 xferPort = 4; * *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
*/ boolean hasXferPort(); /** * required uint32 xferPort = 4; * *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
*/ int getXferPort(); // required uint32 infoPort = 5; /** * required uint32 infoPort = 5; * *
     * datanode http port
     * 
*/ boolean hasInfoPort(); /** * required uint32 infoPort = 5; * *
     * datanode http port
     * 
*/ int getInfoPort(); // required uint32 ipcPort = 6; /** * required uint32 ipcPort = 6; * *
     * ipc server port
     * 
*/ boolean hasIpcPort(); /** * required uint32 ipcPort = 6; * *
     * ipc server port
     * 
*/ int getIpcPort(); // optional uint32 infoSecurePort = 7 [default = 0]; /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
     * datanode https port
     * 
*/ boolean hasInfoSecurePort(); /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
     * datanode https port
     * 
*/ int getInfoSecurePort(); } /** * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} * *
   **
   * Identifies a Datanode
   * 
*/ public static final class DatanodeIDProto extends com.google.protobuf.GeneratedMessage implements DatanodeIDProtoOrBuilder { // Use DatanodeIDProto.newBuilder() to construct. private DatanodeIDProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeIDProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeIDProto defaultInstance; public static DatanodeIDProto getDefaultInstance() { return defaultInstance; } public DatanodeIDProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeIDProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; ipAddr_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; hostName_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; datanodeUuid_ = input.readBytes(); break; } case 32: { bitField0_ |= 0x00000008; xferPort_ = input.readUInt32(); break; } case 40: { bitField0_ |= 0x00000010; infoPort_ = input.readUInt32(); break; } case 48: { bitField0_ |= 0x00000020; ipcPort_ = input.readUInt32(); break; } case 56: { bitField0_ |= 0x00000040; infoSecurePort_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DatanodeIDProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatanodeIDProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string ipAddr = 1; public static final int IPADDR_FIELD_NUMBER = 1; private java.lang.Object ipAddr_; /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ipAddr_ = s; } return s; } } /** * required string ipAddr = 1; * *
     * IP address
     * 
*/ public com.google.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string hostName = 2; public static final int HOSTNAME_FIELD_NUMBER = 2; private java.lang.Object hostName_; /** * required string hostName = 2; * *
     * hostname
     * 
*/ public boolean hasHostName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string hostName = 2; * *
     * hostname
     * 
*/ public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { hostName_ = s; } return s; } } /** * required string hostName = 2; * *
     * hostname
     * 
*/ public com.google.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string datanodeUuid = 3; public static final int DATANODEUUID_FIELD_NUMBER = 3; private java.lang.Object datanodeUuid_; /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { datanodeUuid_ = s; } return s; } } /** * required string datanodeUuid = 3; * *
     * UUID assigned to the Datanode. For
     * 
*/ public com.google.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint32 xferPort = 4; public static final int XFERPORT_FIELD_NUMBER = 4; private int xferPort_; /** * required uint32 xferPort = 4; * *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
*/ public boolean hasXferPort() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 xferPort = 4; * *
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * 
*/ public int getXferPort() { return xferPort_; } // required uint32 infoPort = 5; public static final int INFOPORT_FIELD_NUMBER = 5; private int infoPort_; /** * required uint32 infoPort = 5; * *
     * datanode http port
     * 
*/ public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint32 infoPort = 5; * *
     * datanode http port
     * 
*/ public int getInfoPort() { return infoPort_; } // required uint32 ipcPort = 6; public static final int IPCPORT_FIELD_NUMBER = 6; private int ipcPort_; /** * required uint32 ipcPort = 6; * *
     * ipc server port
     * 
*/ public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint32 ipcPort = 6; * *
     * ipc server port
     * 
*/ public int getIpcPort() { return ipcPort_; } // optional uint32 infoSecurePort = 7 [default = 0]; public static final int INFOSECUREPORT_FIELD_NUMBER = 7; private int infoSecurePort_; /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
     * datanode https port
     * 
*/ public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
     * datanode https port
     * 
*/ public int getInfoSecurePort() { return infoSecurePort_; } private void initFields() { ipAddr_ = ""; hostName_ = ""; datanodeUuid_ = ""; xferPort_ = 0; infoPort_ = 0; ipcPort_ = 0; infoSecurePort_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasIpAddr()) { memoizedIsInitialized = 0; return false; } if (!hasHostName()) { memoizedIsInitialized = 0; return false; } if (!hasDatanodeUuid()) { memoizedIsInitialized = 0; return false; } if (!hasXferPort()) { memoizedIsInitialized = 0; return false; } if (!hasInfoPort()) { memoizedIsInitialized = 0; return false; } if (!hasIpcPort()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getIpAddrBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getHostNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getDatanodeUuidBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt32(4, xferPort_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt32(5, infoPort_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt32(6, ipcPort_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(7, infoSecurePort_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getIpAddrBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getHostNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getDatanodeUuidBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(4, xferPort_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(5, infoPort_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(6, ipcPort_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(7, infoSecurePort_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj; boolean result = true; result = result && (hasIpAddr() == other.hasIpAddr()); if (hasIpAddr()) { result = result && getIpAddr() .equals(other.getIpAddr()); } result = result && (hasHostName() == other.hasHostName()); if (hasHostName()) { result = result && getHostName() .equals(other.getHostName()); } result = result && (hasDatanodeUuid() == other.hasDatanodeUuid()); if (hasDatanodeUuid()) { result = result && getDatanodeUuid() .equals(other.getDatanodeUuid()); } result = result && (hasXferPort() == other.hasXferPort()); if (hasXferPort()) { result = result && (getXferPort() == other.getXferPort()); } result = result && (hasInfoPort() == other.hasInfoPort()); if (hasInfoPort()) { result = result && (getInfoPort() == other.getInfoPort()); } result = result && (hasIpcPort() == other.hasIpcPort()); if (hasIpcPort()) { result = result && (getIpcPort() == other.getIpcPort()); } result = result && (hasInfoSecurePort() == other.hasInfoSecurePort()); if (hasInfoSecurePort()) { result = result && (getInfoSecurePort() == other.getInfoSecurePort()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasIpAddr()) { hash = (37 * hash) + IPADDR_FIELD_NUMBER; hash = (53 * hash) + getIpAddr().hashCode(); } if (hasHostName()) { hash = (37 * hash) + HOSTNAME_FIELD_NUMBER; hash = (53 * hash) + getHostName().hashCode(); } if (hasDatanodeUuid()) { hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER; hash = (53 * hash) + getDatanodeUuid().hashCode(); } if (hasXferPort()) { hash = (37 * hash) + XFERPORT_FIELD_NUMBER; hash = (53 * hash) + getXferPort(); } if (hasInfoPort()) { hash = (37 * hash) + INFOPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoPort(); } if (hasIpcPort()) { hash = (37 * hash) + IPCPORT_FIELD_NUMBER; hash = (53 * hash) + getIpcPort(); } if (hasInfoSecurePort()) { hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER; hash = (53 * hash) + getInfoSecurePort(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeIDProto} * *
     **
     * Identifies a Datanode
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); ipAddr_ = ""; bitField0_ = (bitField0_ & ~0x00000001); hostName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); datanodeUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000004); xferPort_ = 0; bitField0_ = (bitField0_ & ~0x00000008); infoPort_ = 0; bitField0_ = (bitField0_ & ~0x00000010); ipcPort_ = 0; bitField0_ = (bitField0_ & ~0x00000020); infoSecurePort_ = 0; bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.ipAddr_ = ipAddr_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.hostName_ = hostName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.datanodeUuid_ = datanodeUuid_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.xferPort_ = xferPort_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.infoPort_ = infoPort_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.ipcPort_ = ipcPort_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.infoSecurePort_ = infoSecurePort_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this; if (other.hasIpAddr()) { bitField0_ |= 0x00000001; ipAddr_ = other.ipAddr_; onChanged(); } if (other.hasHostName()) { bitField0_ |= 0x00000002; hostName_ = other.hostName_; onChanged(); } if (other.hasDatanodeUuid()) { bitField0_ |= 0x00000004; datanodeUuid_ = other.datanodeUuid_; onChanged(); } if (other.hasXferPort()) { setXferPort(other.getXferPort()); } if (other.hasInfoPort()) { setInfoPort(other.getInfoPort()); } if (other.hasIpcPort()) { setIpcPort(other.getIpcPort()); } if (other.hasInfoSecurePort()) { setInfoSecurePort(other.getInfoSecurePort()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasIpAddr()) { return false; } if (!hasHostName()) { return false; } if (!hasDatanodeUuid()) { return false; } if (!hasXferPort()) { return false; } if (!hasInfoPort()) { return false; } if (!hasIpcPort()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string ipAddr = 1; private java.lang.Object ipAddr_ = ""; /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public boolean hasIpAddr() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public java.lang.String getIpAddr() { java.lang.Object ref = ipAddr_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); ipAddr_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public com.google.protobuf.ByteString getIpAddrBytes() { java.lang.Object ref = ipAddr_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ipAddr_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public Builder setIpAddr( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ipAddr_ = value; onChanged(); return this; } /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public Builder clearIpAddr() { bitField0_ = (bitField0_ & ~0x00000001); ipAddr_ = getDefaultInstance().getIpAddr(); onChanged(); return this; } /** * required string ipAddr = 1; * *
       * IP address
       * 
*/ public Builder setIpAddrBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; ipAddr_ = value; onChanged(); return this; } // required string hostName = 2; private java.lang.Object hostName_ = ""; /** * required string hostName = 2; * *
       * hostname
       * 
*/ public boolean hasHostName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string hostName = 2; * *
       * hostname
       * 
*/ public java.lang.String getHostName() { java.lang.Object ref = hostName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); hostName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string hostName = 2; * *
       * hostname
       * 
*/ public com.google.protobuf.ByteString getHostNameBytes() { java.lang.Object ref = hostName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); hostName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string hostName = 2; * *
       * hostname
       * 
*/ public Builder setHostName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; hostName_ = value; onChanged(); return this; } /** * required string hostName = 2; * *
       * hostname
       * 
*/ public Builder clearHostName() { bitField0_ = (bitField0_ & ~0x00000002); hostName_ = getDefaultInstance().getHostName(); onChanged(); return this; } /** * required string hostName = 2; * *
       * hostname
       * 
*/ public Builder setHostNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; hostName_ = value; onChanged(); return this; } // required string datanodeUuid = 3; private java.lang.Object datanodeUuid_ = ""; /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public boolean hasDatanodeUuid() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public java.lang.String getDatanodeUuid() { java.lang.Object ref = datanodeUuid_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); datanodeUuid_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public com.google.protobuf.ByteString getDatanodeUuidBytes() { java.lang.Object ref = datanodeUuid_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); datanodeUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public Builder setDatanodeUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; datanodeUuid_ = value; onChanged(); return this; } /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public Builder clearDatanodeUuid() { bitField0_ = (bitField0_ & ~0x00000004); datanodeUuid_ = getDefaultInstance().getDatanodeUuid(); onChanged(); return this; } /** * required string datanodeUuid = 3; * *
       * UUID assigned to the Datanode. For
       * 
*/ public Builder setDatanodeUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; datanodeUuid_ = value; onChanged(); return this; } // required uint32 xferPort = 4; private int xferPort_ ; /** * required uint32 xferPort = 4; * *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
*/ public boolean hasXferPort() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 xferPort = 4; * *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
*/ public int getXferPort() { return xferPort_; } /** * required uint32 xferPort = 4; * *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
*/ public Builder setXferPort(int value) { bitField0_ |= 0x00000008; xferPort_ = value; onChanged(); return this; } /** * required uint32 xferPort = 4; * *
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * 
*/ public Builder clearXferPort() { bitField0_ = (bitField0_ & ~0x00000008); xferPort_ = 0; onChanged(); return this; } // required uint32 infoPort = 5; private int infoPort_ ; /** * required uint32 infoPort = 5; * *
       * datanode http port
       * 
*/ public boolean hasInfoPort() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint32 infoPort = 5; * *
       * datanode http port
       * 
*/ public int getInfoPort() { return infoPort_; } /** * required uint32 infoPort = 5; * *
       * datanode http port
       * 
*/ public Builder setInfoPort(int value) { bitField0_ |= 0x00000010; infoPort_ = value; onChanged(); return this; } /** * required uint32 infoPort = 5; * *
       * datanode http port
       * 
*/ public Builder clearInfoPort() { bitField0_ = (bitField0_ & ~0x00000010); infoPort_ = 0; onChanged(); return this; } // required uint32 ipcPort = 6; private int ipcPort_ ; /** * required uint32 ipcPort = 6; * *
       * ipc server port
       * 
*/ public boolean hasIpcPort() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint32 ipcPort = 6; * *
       * ipc server port
       * 
*/ public int getIpcPort() { return ipcPort_; } /** * required uint32 ipcPort = 6; * *
       * ipc server port
       * 
*/ public Builder setIpcPort(int value) { bitField0_ |= 0x00000020; ipcPort_ = value; onChanged(); return this; } /** * required uint32 ipcPort = 6; * *
       * ipc server port
       * 
*/ public Builder clearIpcPort() { bitField0_ = (bitField0_ & ~0x00000020); ipcPort_ = 0; onChanged(); return this; } // optional uint32 infoSecurePort = 7 [default = 0]; private int infoSecurePort_ ; /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
       * datanode https port
       * 
*/ public boolean hasInfoSecurePort() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
       * datanode https port
       * 
*/ public int getInfoSecurePort() { return infoSecurePort_; } /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
       * datanode https port
       * 
*/ public Builder setInfoSecurePort(int value) { bitField0_ |= 0x00000040; infoSecurePort_ = value; onChanged(); return this; } /** * optional uint32 infoSecurePort = 7 [default = 0]; * *
       * datanode https port
       * 
*/ public Builder clearInfoSecurePort() { bitField0_ = (bitField0_ & ~0x00000040); infoSecurePort_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto) } static { defaultInstance = new DatanodeIDProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto) } public interface DatanodeLocalInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string softwareVersion = 1; /** * required string softwareVersion = 1; */ boolean hasSoftwareVersion(); /** * required string softwareVersion = 1; */ java.lang.String getSoftwareVersion(); /** * required string softwareVersion = 1; */ com.google.protobuf.ByteString getSoftwareVersionBytes(); // required string configVersion = 2; /** * required string configVersion = 2; */ boolean hasConfigVersion(); /** * required string configVersion = 2; */ java.lang.String getConfigVersion(); /** * required string configVersion = 2; */ com.google.protobuf.ByteString getConfigVersionBytes(); // required uint64 uptime = 3; /** * required uint64 uptime = 3; */ boolean hasUptime(); /** * required uint64 uptime = 3; */ long getUptime(); } /** * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} * *
   **
   * Datanode local information
   * 
*/ public static final class DatanodeLocalInfoProto extends com.google.protobuf.GeneratedMessage implements DatanodeLocalInfoProtoOrBuilder { // Use DatanodeLocalInfoProto.newBuilder() to construct. private DatanodeLocalInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeLocalInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeLocalInfoProto defaultInstance; public static DatanodeLocalInfoProto getDefaultInstance() { return defaultInstance; } public DatanodeLocalInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeLocalInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; softwareVersion_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; configVersion_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; uptime_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DatanodeLocalInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatanodeLocalInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string softwareVersion = 1; public static final int SOFTWAREVERSION_FIELD_NUMBER = 1; private java.lang.Object softwareVersion_; /** * required string softwareVersion = 1; */ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string softwareVersion = 1; */ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } } /** * required string softwareVersion = 1; */ public com.google.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string configVersion = 2; public static final int CONFIGVERSION_FIELD_NUMBER = 2; private java.lang.Object configVersion_; /** * required string configVersion = 2; */ public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string configVersion = 2; */ public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { configVersion_ = s; } return s; } } /** * required string configVersion = 2; */ public com.google.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint64 uptime = 3; public static final int UPTIME_FIELD_NUMBER = 3; private long uptime_; /** * required uint64 uptime = 3; */ public boolean hasUptime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 uptime = 3; */ public long getUptime() { return uptime_; } private void initFields() { softwareVersion_ = ""; configVersion_ = ""; uptime_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSoftwareVersion()) { memoizedIsInitialized = 0; return false; } if (!hasConfigVersion()) { memoizedIsInitialized = 0; return false; } if (!hasUptime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSoftwareVersionBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getConfigVersionBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, uptime_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSoftwareVersionBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getConfigVersionBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, uptime_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj; boolean result = true; result = result && (hasSoftwareVersion() == other.hasSoftwareVersion()); if (hasSoftwareVersion()) { result = result && getSoftwareVersion() .equals(other.getSoftwareVersion()); } result = result && (hasConfigVersion() == other.hasConfigVersion()); if (hasConfigVersion()) { result = result && getConfigVersion() .equals(other.getConfigVersion()); } result = result && (hasUptime() == other.hasUptime()); if (hasUptime()) { result = result && (getUptime() == other.getUptime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSoftwareVersion()) { hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER; hash = (53 * hash) + getSoftwareVersion().hashCode(); } if (hasConfigVersion()) { hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER; hash = (53 * hash) + getConfigVersion().hashCode(); } if (hasUptime()) { hash = (37 * hash) + UPTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getUptime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto} * *
     **
     * Datanode local information
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); softwareVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000001); configVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000002); uptime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.softwareVersion_ = softwareVersion_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.configVersion_ = configVersion_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.uptime_ = uptime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this; if (other.hasSoftwareVersion()) { bitField0_ |= 0x00000001; softwareVersion_ = other.softwareVersion_; onChanged(); } if (other.hasConfigVersion()) { bitField0_ |= 0x00000002; configVersion_ = other.configVersion_; onChanged(); } if (other.hasUptime()) { setUptime(other.getUptime()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSoftwareVersion()) { return false; } if (!hasConfigVersion()) { return false; } if (!hasUptime()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string softwareVersion = 1; private java.lang.Object softwareVersion_ = ""; /** * required string softwareVersion = 1; */ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string softwareVersion = 1; */ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); softwareVersion_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string softwareVersion = 1; */ public com.google.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string softwareVersion = 1; */ public Builder setSoftwareVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; softwareVersion_ = value; onChanged(); return this; } /** * required string softwareVersion = 1; */ public Builder clearSoftwareVersion() { bitField0_ = (bitField0_ & ~0x00000001); softwareVersion_ = getDefaultInstance().getSoftwareVersion(); onChanged(); return this; } /** * required string softwareVersion = 1; */ public Builder setSoftwareVersionBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; softwareVersion_ = value; onChanged(); return this; } // required string configVersion = 2; private java.lang.Object configVersion_ = ""; /** * required string configVersion = 2; */ public boolean hasConfigVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string configVersion = 2; */ public java.lang.String getConfigVersion() { java.lang.Object ref = configVersion_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); configVersion_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string configVersion = 2; */ public com.google.protobuf.ByteString getConfigVersionBytes() { java.lang.Object ref = configVersion_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); configVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string configVersion = 2; */ public Builder setConfigVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; configVersion_ = value; onChanged(); return this; } /** * required string configVersion = 2; */ public Builder clearConfigVersion() { bitField0_ = (bitField0_ & ~0x00000002); configVersion_ = getDefaultInstance().getConfigVersion(); onChanged(); return this; } /** * required string configVersion = 2; */ public Builder setConfigVersionBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; configVersion_ = value; onChanged(); return this; } // required uint64 uptime = 3; private long uptime_ ; /** * required uint64 uptime = 3; */ public boolean hasUptime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 uptime = 3; */ public long getUptime() { return uptime_; } /** * required uint64 uptime = 3; */ public Builder setUptime(long value) { bitField0_ |= 0x00000004; uptime_ = value; onChanged(); return this; } /** * required uint64 uptime = 3; */ public Builder clearUptime() { bitField0_ = (bitField0_ & ~0x00000004); uptime_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto) } static { defaultInstance = new DatanodeLocalInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto) } public interface DatanodeInfosProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ int getDatanodesCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ java.util.List getDatanodesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} * *
   **
   * DatanodeInfo array
   * 
*/ public static final class DatanodeInfosProto extends com.google.protobuf.GeneratedMessage implements DatanodeInfosProtoOrBuilder { // Use DatanodeInfosProto.newBuilder() to construct. private DatanodeInfosProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeInfosProto defaultInstance; public static DatanodeInfosProto getDefaultInstance() { return defaultInstance; } public DatanodeInfosProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeInfosProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { datanodes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } datanodes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { datanodes_ = java.util.Collections.unmodifiableList(datanodes_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DatanodeInfosProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatanodeInfosProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; public static final int DATANODES_FIELD_NUMBER = 1; private java.util.List datanodes_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesOrBuilderList() { return datanodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public int getDatanodesCount() { return datanodes_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { return datanodes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { return datanodes_.get(index); } private void initFields() { datanodes_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < datanodes_.size(); i++) { output.writeMessage(1, datanodes_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < datanodes_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, datanodes_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj; boolean result = true; result = result && getDatanodesList() .equals(other.getDatanodesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getDatanodesCount() > 0) { hash = (37 * hash) + DATANODES_FIELD_NUMBER; hash = (53 * hash) + getDatanodesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto} * *
     **
     * DatanodeInfo array
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDatanodesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { datanodesBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this); int from_bitField0_ = bitField0_; if (datanodesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { datanodes_ = java.util.Collections.unmodifiableList(datanodes_); bitField0_ = (bitField0_ & ~0x00000001); } result.datanodes_ = datanodes_; } else { result.datanodes_ = datanodesBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this; if (datanodesBuilder_ == null) { if (!other.datanodes_.isEmpty()) { if (datanodes_.isEmpty()) { datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureDatanodesIsMutable(); datanodes_.addAll(other.datanodes_); } onChanged(); } } else { if (!other.datanodes_.isEmpty()) { if (datanodesBuilder_.isEmpty()) { datanodesBuilder_.dispose(); datanodesBuilder_ = null; datanodes_ = other.datanodes_; bitField0_ = (bitField0_ & ~0x00000001); datanodesBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getDatanodesFieldBuilder() : null; } else { datanodesBuilder_.addAllMessages(other.datanodes_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getDatanodesCount(); i++) { if (!getDatanodes(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; private java.util.List datanodes_ = java.util.Collections.emptyList(); private void ensureDatanodesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { datanodes_ = new java.util.ArrayList(datanodes_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesList() { if (datanodesBuilder_ == null) { return java.util.Collections.unmodifiableList(datanodes_); } else { return datanodesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public int getDatanodesCount() { if (datanodesBuilder_ == null) { return datanodes_.size(); } else { return datanodesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.set(index, value); onChanged(); } else { datanodesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder setDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.set(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(value); onChanged(); } else { datanodesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodesIsMutable(); datanodes_.add(index, value); onChanged(); } else { datanodesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addDatanodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.add(index, builderForValue.build()); onChanged(); } else { datanodesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder addAllDatanodes( java.lang.Iterable values) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); super.addAll(values, datanodes_); onChanged(); } else { datanodesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder clearDatanodes() { if (datanodesBuilder_ == null) { datanodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { datanodesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public Builder removeDatanodes(int index) { if (datanodesBuilder_ == null) { ensureDatanodesIsMutable(); datanodes_.remove(index); onChanged(); } else { datanodesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder( int index) { return getDatanodesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( int index) { if (datanodesBuilder_ == null) { return datanodes_.get(index); } else { return datanodesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesOrBuilderList() { if (datanodesBuilder_ != null) { return datanodesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(datanodes_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() { return getDatanodesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder( int index) { return getDatanodesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1; */ public java.util.List getDatanodesBuilderList() { return getDatanodesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getDatanodesFieldBuilder() { if (datanodesBuilder_ == null) { datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( datanodes_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); datanodes_ = null; } return datanodesBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto) } static { defaultInstance = new DatanodeInfosProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto) } public interface DatanodeInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.DatanodeIDProto id = 1; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ boolean hasId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId(); /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder(); // optional uint64 capacity = 2 [default = 0]; /** * optional uint64 capacity = 2 [default = 0]; */ boolean hasCapacity(); /** * optional uint64 capacity = 2 [default = 0]; */ long getCapacity(); // optional uint64 dfsUsed = 3 [default = 0]; /** * optional uint64 dfsUsed = 3 [default = 0]; */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 3 [default = 0]; */ long getDfsUsed(); // optional uint64 remaining = 4 [default = 0]; /** * optional uint64 remaining = 4 [default = 0]; */ boolean hasRemaining(); /** * optional uint64 remaining = 4 [default = 0]; */ long getRemaining(); // optional uint64 blockPoolUsed = 5 [default = 0]; /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ long getBlockPoolUsed(); // optional uint64 lastUpdate = 6 [default = 0]; /** * optional uint64 lastUpdate = 6 [default = 0]; */ boolean hasLastUpdate(); /** * optional uint64 lastUpdate = 6 [default = 0]; */ long getLastUpdate(); // optional uint32 xceiverCount = 7 [default = 0]; /** * optional uint32 xceiverCount = 7 [default = 0]; */ boolean hasXceiverCount(); /** * optional uint32 xceiverCount = 7 [default = 0]; */ int getXceiverCount(); // optional string location = 8; /** * optional string location = 8; */ boolean hasLocation(); /** * optional string location = 8; */ java.lang.String getLocation(); /** * optional string location = 8; */ com.google.protobuf.ByteString getLocationBytes(); // optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ boolean hasAdminState(); /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState(); // optional uint64 cacheCapacity = 11 [default = 0]; /** * optional uint64 cacheCapacity = 11 [default = 0]; */ boolean hasCacheCapacity(); /** * optional uint64 cacheCapacity = 11 [default = 0]; */ long getCacheCapacity(); // optional uint64 cacheUsed = 12 [default = 0]; /** * optional uint64 cacheUsed = 12 [default = 0]; */ boolean hasCacheUsed(); /** * optional uint64 cacheUsed = 12 [default = 0]; */ long getCacheUsed(); // optional uint64 lastUpdateMonotonic = 13 [default = 0]; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ boolean hasLastUpdateMonotonic(); /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ long getLastUpdateMonotonic(); } /** * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} * *
   **
   * The status of a Datanode
   * 
*/ public static final class DatanodeInfoProto extends com.google.protobuf.GeneratedMessage implements DatanodeInfoProtoOrBuilder { // Use DatanodeInfoProto.newBuilder() to construct. private DatanodeInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeInfoProto defaultInstance; public static DatanodeInfoProto getDefaultInstance() { return defaultInstance; } public DatanodeInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = id_.toBuilder(); } id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(id_); id_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; capacity_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; dfsUsed_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; remaining_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; blockPoolUsed_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; lastUpdate_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; xceiverCount_ = input.readUInt32(); break; } case 66: { bitField0_ |= 0x00000080; location_ = input.readBytes(); break; } case 80: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(10, rawValue); } else { bitField0_ |= 0x00000100; adminState_ = value; } break; } case 88: { bitField0_ |= 0x00000200; cacheCapacity_ = input.readUInt64(); break; } case 96: { bitField0_ |= 0x00000400; cacheUsed_ = input.readUInt64(); break; } case 104: { bitField0_ |= 0x00000800; lastUpdateMonotonic_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DatanodeInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatanodeInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } /** * Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState} */ public enum AdminState implements com.google.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0, 0), /** * DECOMMISSION_INPROGRESS = 1; */ DECOMMISSION_INPROGRESS(1, 1), /** * DECOMMISSIONED = 2; */ DECOMMISSIONED(2, 2), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * DECOMMISSION_INPROGRESS = 1; */ public static final int DECOMMISSION_INPROGRESS_VALUE = 1; /** * DECOMMISSIONED = 2; */ public static final int DECOMMISSIONED_VALUE = 2; public final int getNumber() { return value; } public static AdminState valueOf(int value) { switch (value) { case 0: return NORMAL; case 1: return DECOMMISSION_INPROGRESS; case 2: return DECOMMISSIONED; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public AdminState findValueByNumber(int number) { return AdminState.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0); } private static final AdminState[] VALUES = values(); public static AdminState valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private AdminState(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState) } private int bitField0_; // required .hadoop.hdfs.DatanodeIDProto id = 1; public static final int ID_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { return id_; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { return id_; } // optional uint64 capacity = 2 [default = 0]; public static final int CAPACITY_FIELD_NUMBER = 2; private long capacity_; /** * optional uint64 capacity = 2 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 capacity = 2 [default = 0]; */ public long getCapacity() { return capacity_; } // optional uint64 dfsUsed = 3 [default = 0]; public static final int DFSUSED_FIELD_NUMBER = 3; private long dfsUsed_; /** * optional uint64 dfsUsed = 3 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } // optional uint64 remaining = 4 [default = 0]; public static final int REMAINING_FIELD_NUMBER = 4; private long remaining_; /** * optional uint64 remaining = 4 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 remaining = 4 [default = 0]; */ public long getRemaining() { return remaining_; } // optional uint64 blockPoolUsed = 5 [default = 0]; public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5; private long blockPoolUsed_; /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } // optional uint64 lastUpdate = 6 [default = 0]; public static final int LASTUPDATE_FIELD_NUMBER = 6; private long lastUpdate_; /** * optional uint64 lastUpdate = 6 [default = 0]; */ public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public long getLastUpdate() { return lastUpdate_; } // optional uint32 xceiverCount = 7 [default = 0]; public static final int XCEIVERCOUNT_FIELD_NUMBER = 7; private int xceiverCount_; /** * optional uint32 xceiverCount = 7 [default = 0]; */ public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public int getXceiverCount() { return xceiverCount_; } // optional string location = 8; public static final int LOCATION_FIELD_NUMBER = 8; private java.lang.Object location_; /** * optional string location = 8; */ public boolean hasLocation() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional string location = 8; */ public java.lang.String getLocation() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { location_ = s; } return s; } } /** * optional string location = 8; */ public com.google.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; public static final int ADMINSTATE_FIELD_NUMBER = 10; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public boolean hasAdminState() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { return adminState_; } // optional uint64 cacheCapacity = 11 [default = 0]; public static final int CACHECAPACITY_FIELD_NUMBER = 11; private long cacheCapacity_; /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public long getCacheCapacity() { return cacheCapacity_; } // optional uint64 cacheUsed = 12 [default = 0]; public static final int CACHEUSED_FIELD_NUMBER = 12; private long cacheUsed_; /** * optional uint64 cacheUsed = 12 [default = 0]; */ public boolean hasCacheUsed() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public long getCacheUsed() { return cacheUsed_; } // optional uint64 lastUpdateMonotonic = 13 [default = 0]; public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13; private long lastUpdateMonotonic_; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } private void initFields() { id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); capacity_ = 0L; dfsUsed_ = 0L; remaining_ = 0L; blockPoolUsed_ = 0L; lastUpdate_ = 0L; xceiverCount_ = 0; location_ = ""; adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; cacheCapacity_ = 0L; cacheUsed_ = 0L; lastUpdateMonotonic_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (!getId().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, capacity_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, dfsUsed_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, remaining_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, lastUpdate_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(7, xceiverCount_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeBytes(8, getLocationBytes()); } if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeEnum(10, adminState_.getNumber()); } if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeUInt64(11, cacheCapacity_); } if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeUInt64(12, cacheUsed_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeUInt64(13, lastUpdateMonotonic_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, capacity_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, dfsUsed_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, remaining_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(5, blockPoolUsed_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, lastUpdate_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(7, xceiverCount_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(8, getLocationBytes()); } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(10, adminState_.getNumber()); } if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(11, cacheCapacity_); } if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(12, cacheUsed_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(13, lastUpdateMonotonic_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && getId() .equals(other.getId()); } result = result && (hasCapacity() == other.hasCapacity()); if (hasCapacity()) { result = result && (getCapacity() == other.getCapacity()); } result = result && (hasDfsUsed() == other.hasDfsUsed()); if (hasDfsUsed()) { result = result && (getDfsUsed() == other.getDfsUsed()); } result = result && (hasRemaining() == other.hasRemaining()); if (hasRemaining()) { result = result && (getRemaining() == other.getRemaining()); } result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed()); if (hasBlockPoolUsed()) { result = result && (getBlockPoolUsed() == other.getBlockPoolUsed()); } result = result && (hasLastUpdate() == other.hasLastUpdate()); if (hasLastUpdate()) { result = result && (getLastUpdate() == other.getLastUpdate()); } result = result && (hasXceiverCount() == other.hasXceiverCount()); if (hasXceiverCount()) { result = result && (getXceiverCount() == other.getXceiverCount()); } result = result && (hasLocation() == other.hasLocation()); if (hasLocation()) { result = result && getLocation() .equals(other.getLocation()); } result = result && (hasAdminState() == other.hasAdminState()); if (hasAdminState()) { result = result && (getAdminState() == other.getAdminState()); } result = result && (hasCacheCapacity() == other.hasCacheCapacity()); if (hasCacheCapacity()) { result = result && (getCacheCapacity() == other.getCacheCapacity()); } result = result && (hasCacheUsed() == other.hasCacheUsed()); if (hasCacheUsed()) { result = result && (getCacheUsed() == other.getCacheUsed()); } result = result && (hasLastUpdateMonotonic() == other.hasLastUpdateMonotonic()); if (hasLastUpdateMonotonic()) { result = result && (getLastUpdateMonotonic() == other.getLastUpdateMonotonic()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + getId().hashCode(); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + hashLong(getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockPoolUsed()); } if (hasLastUpdate()) { hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastUpdate()); } if (hasXceiverCount()) { hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER; hash = (53 * hash) + getXceiverCount(); } if (hasLocation()) { hash = (37 * hash) + LOCATION_FIELD_NUMBER; hash = (53 * hash) + getLocation().hashCode(); } if (hasAdminState()) { hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getAdminState()); } if (hasCacheCapacity()) { hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCacheCapacity()); } if (hasCacheUsed()) { hash = (37 * hash) + CACHEUSED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCacheUsed()); } if (hasLastUpdateMonotonic()) { hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastUpdateMonotonic()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto} * *
     **
     * The status of a Datanode
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getIdFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (idBuilder_ == null) { id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); } else { idBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); capacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); dfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); remaining_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); blockPoolUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); lastUpdate_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); xceiverCount_ = 0; bitField0_ = (bitField0_ & ~0x00000040); location_ = ""; bitField0_ = (bitField0_ & ~0x00000080); adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; bitField0_ = (bitField0_ & ~0x00000100); cacheCapacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000200); cacheUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); lastUpdateMonotonic_ = 0L; bitField0_ = (bitField0_ & ~0x00000800); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (idBuilder_ == null) { result.id_ = id_; } else { result.id_ = idBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.capacity_ = capacity_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.dfsUsed_ = dfsUsed_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.remaining_ = remaining_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.blockPoolUsed_ = blockPoolUsed_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.lastUpdate_ = lastUpdate_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.xceiverCount_ = xceiverCount_; if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.location_ = location_; if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } result.adminState_ = adminState_; if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000200; } result.cacheCapacity_ = cacheCapacity_; if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000400; } result.cacheUsed_ = cacheUsed_; if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000800; } result.lastUpdateMonotonic_ = lastUpdateMonotonic_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this; if (other.hasId()) { mergeId(other.getId()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasLastUpdate()) { setLastUpdate(other.getLastUpdate()); } if (other.hasXceiverCount()) { setXceiverCount(other.getXceiverCount()); } if (other.hasLocation()) { bitField0_ |= 0x00000080; location_ = other.location_; onChanged(); } if (other.hasAdminState()) { setAdminState(other.getAdminState()); } if (other.hasCacheCapacity()) { setCacheCapacity(other.getCacheCapacity()); } if (other.hasCacheUsed()) { setCacheUsed(other.getCacheUsed()); } if (other.hasLastUpdateMonotonic()) { setLastUpdateMonotonic(other.getLastUpdateMonotonic()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasId()) { return false; } if (!getId().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.DatanodeIDProto id = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_; /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { if (idBuilder_ == null) { return id_; } else { return idBuilder_.getMessage(); } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (value == null) { throw new NullPointerException(); } id_ = value; onChanged(); } else { idBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder setId( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (idBuilder_ == null) { id_ = builderForValue.build(); onChanged(); } else { idBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (idBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial(); } else { id_ = value; } onChanged(); } else { idBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public Builder clearId() { if (idBuilder_ == null) { id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); onChanged(); } else { idBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() { bitField0_ |= 0x00000001; onChanged(); return getIdFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { if (idBuilder_ != null) { return idBuilder_.getMessageOrBuilder(); } else { return id_; } } /** * required .hadoop.hdfs.DatanodeIDProto id = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> getIdFieldBuilder() { if (idBuilder_ == null) { idBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( id_, getParentForChildren(), isClean()); id_ = null; } return idBuilder_; } // optional uint64 capacity = 2 [default = 0]; private long capacity_ ; /** * optional uint64 capacity = 2 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 capacity = 2 [default = 0]; */ public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 2 [default = 0]; */ public Builder setCapacity(long value) { bitField0_ |= 0x00000002; capacity_ = value; onChanged(); return this; } /** * optional uint64 capacity = 2 [default = 0]; */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000002); capacity_ = 0L; onChanged(); return this; } // optional uint64 dfsUsed = 3 [default = 0]; private long dfsUsed_ ; /** * optional uint64 dfsUsed = 3 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public Builder setDfsUsed(long value) { bitField0_ |= 0x00000004; dfsUsed_ = value; onChanged(); return this; } /** * optional uint64 dfsUsed = 3 [default = 0]; */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000004); dfsUsed_ = 0L; onChanged(); return this; } // optional uint64 remaining = 4 [default = 0]; private long remaining_ ; /** * optional uint64 remaining = 4 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 remaining = 4 [default = 0]; */ public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 4 [default = 0]; */ public Builder setRemaining(long value) { bitField0_ |= 0x00000008; remaining_ = value; onChanged(); return this; } /** * optional uint64 remaining = 4 [default = 0]; */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000008); remaining_ = 0L; onChanged(); return this; } // optional uint64 blockPoolUsed = 5 [default = 0]; private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public Builder setBlockPoolUsed(long value) { bitField0_ |= 0x00000010; blockPoolUsed_ = value; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 5 [default = 0]; */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000010); blockPoolUsed_ = 0L; onChanged(); return this; } // optional uint64 lastUpdate = 6 [default = 0]; private long lastUpdate_ ; /** * optional uint64 lastUpdate = 6 [default = 0]; */ public boolean hasLastUpdate() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public long getLastUpdate() { return lastUpdate_; } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public Builder setLastUpdate(long value) { bitField0_ |= 0x00000020; lastUpdate_ = value; onChanged(); return this; } /** * optional uint64 lastUpdate = 6 [default = 0]; */ public Builder clearLastUpdate() { bitField0_ = (bitField0_ & ~0x00000020); lastUpdate_ = 0L; onChanged(); return this; } // optional uint32 xceiverCount = 7 [default = 0]; private int xceiverCount_ ; /** * optional uint32 xceiverCount = 7 [default = 0]; */ public boolean hasXceiverCount() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public int getXceiverCount() { return xceiverCount_; } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public Builder setXceiverCount(int value) { bitField0_ |= 0x00000040; xceiverCount_ = value; onChanged(); return this; } /** * optional uint32 xceiverCount = 7 [default = 0]; */ public Builder clearXceiverCount() { bitField0_ = (bitField0_ & ~0x00000040); xceiverCount_ = 0; onChanged(); return this; } // optional string location = 8; private java.lang.Object location_ = ""; /** * optional string location = 8; */ public boolean hasLocation() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional string location = 8; */ public java.lang.String getLocation() { java.lang.Object ref = location_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); location_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string location = 8; */ public com.google.protobuf.ByteString getLocationBytes() { java.lang.Object ref = location_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); location_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * optional string location = 8; */ public Builder setLocation( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; location_ = value; onChanged(); return this; } /** * optional string location = 8; */ public Builder clearLocation() { bitField0_ = (bitField0_ & ~0x00000080); location_ = getDefaultInstance().getLocation(); onChanged(); return this; } /** * optional string location = 8; */ public Builder setLocationBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; location_ = value; onChanged(); return this; } // optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public boolean hasAdminState() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { return adminState_; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; adminState_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL]; */ public Builder clearAdminState() { bitField0_ = (bitField0_ & ~0x00000100); adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; onChanged(); return this; } // optional uint64 cacheCapacity = 11 [default = 0]; private long cacheCapacity_ ; /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public boolean hasCacheCapacity() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public long getCacheCapacity() { return cacheCapacity_; } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public Builder setCacheCapacity(long value) { bitField0_ |= 0x00000200; cacheCapacity_ = value; onChanged(); return this; } /** * optional uint64 cacheCapacity = 11 [default = 0]; */ public Builder clearCacheCapacity() { bitField0_ = (bitField0_ & ~0x00000200); cacheCapacity_ = 0L; onChanged(); return this; } // optional uint64 cacheUsed = 12 [default = 0]; private long cacheUsed_ ; /** * optional uint64 cacheUsed = 12 [default = 0]; */ public boolean hasCacheUsed() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public long getCacheUsed() { return cacheUsed_; } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public Builder setCacheUsed(long value) { bitField0_ |= 0x00000400; cacheUsed_ = value; onChanged(); return this; } /** * optional uint64 cacheUsed = 12 [default = 0]; */ public Builder clearCacheUsed() { bitField0_ = (bitField0_ & ~0x00000400); cacheUsed_ = 0L; onChanged(); return this; } // optional uint64 lastUpdateMonotonic = 13 [default = 0]; private long lastUpdateMonotonic_ ; /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public boolean hasLastUpdateMonotonic() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public long getLastUpdateMonotonic() { return lastUpdateMonotonic_; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public Builder setLastUpdateMonotonic(long value) { bitField0_ |= 0x00000800; lastUpdateMonotonic_ = value; onChanged(); return this; } /** * optional uint64 lastUpdateMonotonic = 13 [default = 0]; */ public Builder clearLastUpdateMonotonic() { bitField0_ = (bitField0_ & ~0x00000800); lastUpdateMonotonic_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto) } static { defaultInstance = new DatanodeInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto) } public interface DatanodeStorageProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string storageUuid = 1; /** * required string storageUuid = 1; */ boolean hasStorageUuid(); /** * required string storageUuid = 1; */ java.lang.String getStorageUuid(); /** * required string storageUuid = 1; */ com.google.protobuf.ByteString getStorageUuidBytes(); // optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ boolean hasState(); /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState(); // optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ boolean hasStorageType(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); } /** * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} * *
   **
   * Represents a storage available on the datanode
   * 
*/ public static final class DatanodeStorageProto extends com.google.protobuf.GeneratedMessage implements DatanodeStorageProtoOrBuilder { // Use DatanodeStorageProto.newBuilder() to construct. private DatanodeStorageProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeStorageProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeStorageProto defaultInstance; public static DatanodeStorageProto getDefaultInstance() { return defaultInstance; } public DatanodeStorageProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeStorageProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; storageUuid_ = input.readBytes(); break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; state_ = value; } break; } case 24: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(3, rawValue); } else { bitField0_ |= 0x00000004; storageType_ = value; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DatanodeStorageProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatanodeStorageProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } /** * Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState} */ public enum StorageState implements com.google.protobuf.ProtocolMessageEnum { /** * NORMAL = 0; */ NORMAL(0, 0), /** * READ_ONLY_SHARED = 1; */ READ_ONLY_SHARED(1, 1), ; /** * NORMAL = 0; */ public static final int NORMAL_VALUE = 0; /** * READ_ONLY_SHARED = 1; */ public static final int READ_ONLY_SHARED_VALUE = 1; public final int getNumber() { return value; } public static StorageState valueOf(int value) { switch (value) { case 0: return NORMAL; case 1: return READ_ONLY_SHARED; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public StorageState findValueByNumber(int number) { return StorageState.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0); } private static final StorageState[] VALUES = values(); public static StorageState valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private StorageState(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState) } private int bitField0_; // required string storageUuid = 1; public static final int STORAGEUUID_FIELD_NUMBER = 1; private java.lang.Object storageUuid_; /** * required string storageUuid = 1; */ public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string storageUuid = 1; */ public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1; */ public com.google.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; public static final int STATE_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { return state_; } // optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; public static final int STORAGETYPE_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { return storageType_; } private void initFields() { storageUuid_ = ""; state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL; storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getStorageUuidBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeEnum(2, state_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeEnum(3, storageType_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getStorageUuidBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(2, state_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(3, storageType_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj; boolean result = true; result = result && (hasStorageUuid() == other.hasStorageUuid()); if (hasStorageUuid()) { result = result && getStorageUuid() .equals(other.getStorageUuid()); } result = result && (hasState() == other.hasState()); if (hasState()) { result = result && (getState() == other.getState()); } result = result && (hasStorageType() == other.hasStorageType()); if (hasStorageType()) { result = result && (getStorageType() == other.getStorageType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getState()); } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getStorageType()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto} * *
     **
     * Represents a storage available on the datanode
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); storageUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000001); state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL; bitField0_ = (bitField0_ & ~0x00000002); storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.storageUuid_ = storageUuid_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.state_ = state_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.storageType_ = storageType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { bitField0_ |= 0x00000001; storageUuid_ = other.storageUuid_; onChanged(); } if (other.hasState()) { setState(other.getState()); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string storageUuid = 1; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1; */ public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string storageUuid = 1; */ public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); storageUuid_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1; */ public com.google.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string storageUuid = 1; */ public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } /** * required string storageUuid = 1; */ public Builder clearStorageUuid() { bitField0_ = (bitField0_ & ~0x00000001); storageUuid_ = getDefaultInstance().getStorageUuid(); onChanged(); return this; } /** * required string storageUuid = 1; */ public Builder setStorageUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } // optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL; /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() { return state_; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; state_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL]; */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL; onChanged(); return this; } // optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { return storageType_; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; storageType_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK]; */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000004); storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto) } static { defaultInstance = new DatanodeStorageProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto) } public interface StorageReportProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string storageUuid = 1 [deprecated = true]; /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated boolean hasStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated java.lang.String getStorageUuid(); /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated com.google.protobuf.ByteString getStorageUuidBytes(); // optional bool failed = 2 [default = false]; /** * optional bool failed = 2 [default = false]; */ boolean hasFailed(); /** * optional bool failed = 2 [default = false]; */ boolean getFailed(); // optional uint64 capacity = 3 [default = 0]; /** * optional uint64 capacity = 3 [default = 0]; */ boolean hasCapacity(); /** * optional uint64 capacity = 3 [default = 0]; */ long getCapacity(); // optional uint64 dfsUsed = 4 [default = 0]; /** * optional uint64 dfsUsed = 4 [default = 0]; */ boolean hasDfsUsed(); /** * optional uint64 dfsUsed = 4 [default = 0]; */ long getDfsUsed(); // optional uint64 remaining = 5 [default = 0]; /** * optional uint64 remaining = 5 [default = 0]; */ boolean hasRemaining(); /** * optional uint64 remaining = 5 [default = 0]; */ long getRemaining(); // optional uint64 blockPoolUsed = 6 [default = 0]; /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ boolean hasBlockPoolUsed(); /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ long getBlockPoolUsed(); // optional .hadoop.hdfs.DatanodeStorageProto storage = 7; /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ boolean hasStorage(); /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage(); /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class StorageReportProto extends com.google.protobuf.GeneratedMessage implements StorageReportProtoOrBuilder { // Use StorageReportProto.newBuilder() to construct. private StorageReportProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageReportProto defaultInstance; public static StorageReportProto getDefaultInstance() { return defaultInstance; } public StorageReportProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageReportProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; storageUuid_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; failed_ = input.readBool(); break; } case 24: { bitField0_ |= 0x00000004; capacity_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; dfsUsed_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; remaining_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; blockPoolUsed_ = input.readUInt64(); break; } case 58: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) == 0x00000040)) { subBuilder = storage_.toBuilder(); } storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storage_); storage_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageReportProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageReportProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string storageUuid = 1 [deprecated = true]; public static final int STORAGEUUID_FIELD_NUMBER = 1; private java.lang.Object storageUuid_; /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { storageUuid_ = s; } return s; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public com.google.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional bool failed = 2 [default = false]; public static final int FAILED_FIELD_NUMBER = 2; private boolean failed_; /** * optional bool failed = 2 [default = false]; */ public boolean hasFailed() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool failed = 2 [default = false]; */ public boolean getFailed() { return failed_; } // optional uint64 capacity = 3 [default = 0]; public static final int CAPACITY_FIELD_NUMBER = 3; private long capacity_; /** * optional uint64 capacity = 3 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 capacity = 3 [default = 0]; */ public long getCapacity() { return capacity_; } // optional uint64 dfsUsed = 4 [default = 0]; public static final int DFSUSED_FIELD_NUMBER = 4; private long dfsUsed_; /** * optional uint64 dfsUsed = 4 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } // optional uint64 remaining = 5 [default = 0]; public static final int REMAINING_FIELD_NUMBER = 5; private long remaining_; /** * optional uint64 remaining = 5 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 remaining = 5 [default = 0]; */ public long getRemaining() { return remaining_; } // optional uint64 blockPoolUsed = 6 [default = 0]; public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6; private long blockPoolUsed_; /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } // optional .hadoop.hdfs.DatanodeStorageProto storage = 7; public static final int STORAGE_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_; /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ public boolean hasStorage() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { return storage_; } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
     * supersedes StorageUuid
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { return storage_; } private void initFields() { storageUuid_ = ""; failed_ = false; capacity_ = 0L; dfsUsed_ = 0L; remaining_ = 0L; blockPoolUsed_ = 0L; storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStorageUuid()) { memoizedIsInitialized = 0; return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getStorageUuidBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, failed_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, capacity_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, dfsUsed_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, remaining_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeMessage(7, storage_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getStorageUuidBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, failed_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, capacity_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, dfsUsed_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(5, remaining_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, blockPoolUsed_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(7, storage_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj; boolean result = true; result = result && (hasStorageUuid() == other.hasStorageUuid()); if (hasStorageUuid()) { result = result && getStorageUuid() .equals(other.getStorageUuid()); } result = result && (hasFailed() == other.hasFailed()); if (hasFailed()) { result = result && (getFailed() == other.getFailed()); } result = result && (hasCapacity() == other.hasCapacity()); if (hasCapacity()) { result = result && (getCapacity() == other.getCapacity()); } result = result && (hasDfsUsed() == other.hasDfsUsed()); if (hasDfsUsed()) { result = result && (getDfsUsed() == other.getDfsUsed()); } result = result && (hasRemaining() == other.hasRemaining()); if (hasRemaining()) { result = result && (getRemaining() == other.getRemaining()); } result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed()); if (hasBlockPoolUsed()) { result = result && (getBlockPoolUsed() == other.getBlockPoolUsed()); } result = result && (hasStorage() == other.hasStorage()); if (hasStorage()) { result = result && getStorage() .equals(other.getStorage()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStorageUuid()) { hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER; hash = (53 * hash) + getStorageUuid().hashCode(); } if (hasFailed()) { hash = (37 * hash) + FAILED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getFailed()); } if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCapacity()); } if (hasDfsUsed()) { hash = (37 * hash) + DFSUSED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDfsUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + hashLong(getRemaining()); } if (hasBlockPoolUsed()) { hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockPoolUsed()); } if (hasStorage()) { hash = (37 * hash) + STORAGE_FIELD_NUMBER; hash = (53 * hash) + getStorage().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageReportProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStorageFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); storageUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000001); failed_ = false; bitField0_ = (bitField0_ & ~0x00000002); capacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); dfsUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); remaining_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); blockPoolUsed_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); if (storageBuilder_ == null) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.storageUuid_ = storageUuid_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.failed_ = failed_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.capacity_ = capacity_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.dfsUsed_ = dfsUsed_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.remaining_ = remaining_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.blockPoolUsed_ = blockPoolUsed_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } if (storageBuilder_ == null) { result.storage_ = storage_; } else { result.storage_ = storageBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this; if (other.hasStorageUuid()) { bitField0_ |= 0x00000001; storageUuid_ = other.storageUuid_; onChanged(); } if (other.hasFailed()) { setFailed(other.getFailed()); } if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasDfsUsed()) { setDfsUsed(other.getDfsUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasBlockPoolUsed()) { setBlockPoolUsed(other.getBlockPoolUsed()); } if (other.hasStorage()) { mergeStorage(other.getStorage()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStorageUuid()) { return false; } if (hasStorage()) { if (!getStorage().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string storageUuid = 1 [deprecated = true]; private java.lang.Object storageUuid_ = ""; /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public boolean hasStorageUuid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public java.lang.String getStorageUuid() { java.lang.Object ref = storageUuid_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); storageUuid_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public com.google.protobuf.ByteString getStorageUuidBytes() { java.lang.Object ref = storageUuid_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); storageUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder setStorageUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder clearStorageUuid() { bitField0_ = (bitField0_ & ~0x00000001); storageUuid_ = getDefaultInstance().getStorageUuid(); onChanged(); return this; } /** * required string storageUuid = 1 [deprecated = true]; */ @java.lang.Deprecated public Builder setStorageUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; storageUuid_ = value; onChanged(); return this; } // optional bool failed = 2 [default = false]; private boolean failed_ ; /** * optional bool failed = 2 [default = false]; */ public boolean hasFailed() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool failed = 2 [default = false]; */ public boolean getFailed() { return failed_; } /** * optional bool failed = 2 [default = false]; */ public Builder setFailed(boolean value) { bitField0_ |= 0x00000002; failed_ = value; onChanged(); return this; } /** * optional bool failed = 2 [default = false]; */ public Builder clearFailed() { bitField0_ = (bitField0_ & ~0x00000002); failed_ = false; onChanged(); return this; } // optional uint64 capacity = 3 [default = 0]; private long capacity_ ; /** * optional uint64 capacity = 3 [default = 0]; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 capacity = 3 [default = 0]; */ public long getCapacity() { return capacity_; } /** * optional uint64 capacity = 3 [default = 0]; */ public Builder setCapacity(long value) { bitField0_ |= 0x00000004; capacity_ = value; onChanged(); return this; } /** * optional uint64 capacity = 3 [default = 0]; */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000004); capacity_ = 0L; onChanged(); return this; } // optional uint64 dfsUsed = 4 [default = 0]; private long dfsUsed_ ; /** * optional uint64 dfsUsed = 4 [default = 0]; */ public boolean hasDfsUsed() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public long getDfsUsed() { return dfsUsed_; } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public Builder setDfsUsed(long value) { bitField0_ |= 0x00000008; dfsUsed_ = value; onChanged(); return this; } /** * optional uint64 dfsUsed = 4 [default = 0]; */ public Builder clearDfsUsed() { bitField0_ = (bitField0_ & ~0x00000008); dfsUsed_ = 0L; onChanged(); return this; } // optional uint64 remaining = 5 [default = 0]; private long remaining_ ; /** * optional uint64 remaining = 5 [default = 0]; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 remaining = 5 [default = 0]; */ public long getRemaining() { return remaining_; } /** * optional uint64 remaining = 5 [default = 0]; */ public Builder setRemaining(long value) { bitField0_ |= 0x00000010; remaining_ = value; onChanged(); return this; } /** * optional uint64 remaining = 5 [default = 0]; */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000010); remaining_ = 0L; onChanged(); return this; } // optional uint64 blockPoolUsed = 6 [default = 0]; private long blockPoolUsed_ ; /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public boolean hasBlockPoolUsed() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public long getBlockPoolUsed() { return blockPoolUsed_; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public Builder setBlockPoolUsed(long value) { bitField0_ |= 0x00000020; blockPoolUsed_ = value; onChanged(); return this; } /** * optional uint64 blockPoolUsed = 6 [default = 0]; */ public Builder clearBlockPoolUsed() { bitField0_ = (bitField0_ & ~0x00000020); blockPoolUsed_ = 0L; onChanged(); return this; } // optional .hadoop.hdfs.DatanodeStorageProto storage = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_; /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public boolean hasStorage() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() { if (storageBuilder_ == null) { return storage_; } else { return storageBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storage_ = value; onChanged(); } else { storageBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public Builder setStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) { if (storageBuilder_ == null) { storage_ = builderForValue.build(); onChanged(); } else { storageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) { if (storageBuilder_ == null) { if (((bitField0_ & 0x00000040) == 0x00000040) && storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial(); } else { storage_ = value; } onChanged(); } else { storageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public Builder clearStorage() { if (storageBuilder_ == null) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance(); onChanged(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() { bitField0_ |= 0x00000040; onChanged(); return getStorageFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() { if (storageBuilder_ != null) { return storageBuilder_.getMessageOrBuilder(); } else { return storage_; } } /** * optional .hadoop.hdfs.DatanodeStorageProto storage = 7; * *
       * supersedes StorageUuid
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> getStorageFieldBuilder() { if (storageBuilder_ == null) { storageBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>( storage_, getParentForChildren(), isClean()); storage_ = null; } return storageBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto) } static { defaultInstance = new StorageReportProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto) } public interface ContentSummaryProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 length = 1; /** * required uint64 length = 1; */ boolean hasLength(); /** * required uint64 length = 1; */ long getLength(); // required uint64 fileCount = 2; /** * required uint64 fileCount = 2; */ boolean hasFileCount(); /** * required uint64 fileCount = 2; */ long getFileCount(); // required uint64 directoryCount = 3; /** * required uint64 directoryCount = 3; */ boolean hasDirectoryCount(); /** * required uint64 directoryCount = 3; */ long getDirectoryCount(); // required uint64 quota = 4; /** * required uint64 quota = 4; */ boolean hasQuota(); /** * required uint64 quota = 4; */ long getQuota(); // required uint64 spaceConsumed = 5; /** * required uint64 spaceConsumed = 5; */ boolean hasSpaceConsumed(); /** * required uint64 spaceConsumed = 5; */ long getSpaceConsumed(); // required uint64 spaceQuota = 6; /** * required uint64 spaceQuota = 6; */ boolean hasSpaceQuota(); /** * required uint64 spaceQuota = 6; */ long getSpaceQuota(); // optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ boolean hasTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos(); /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} * *
   **
   * Summary of a file or directory
   * 
*/ public static final class ContentSummaryProto extends com.google.protobuf.GeneratedMessage implements ContentSummaryProtoOrBuilder { // Use ContentSummaryProto.newBuilder() to construct. private ContentSummaryProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ContentSummaryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ContentSummaryProto defaultInstance; public static ContentSummaryProto getDefaultInstance() { return defaultInstance; } public ContentSummaryProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ContentSummaryProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; length_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; fileCount_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; directoryCount_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; quota_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; spaceConsumed_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; spaceQuota_ = input.readUInt64(); break; } case 58: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null; if (((bitField0_ & 0x00000040) == 0x00000040)) { subBuilder = typeQuotaInfos_.toBuilder(); } typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(typeQuotaInfos_); typeQuotaInfos_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000040; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public ContentSummaryProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ContentSummaryProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 length = 1; public static final int LENGTH_FIELD_NUMBER = 1; private long length_; /** * required uint64 length = 1; */ public boolean hasLength() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 length = 1; */ public long getLength() { return length_; } // required uint64 fileCount = 2; public static final int FILECOUNT_FIELD_NUMBER = 2; private long fileCount_; /** * required uint64 fileCount = 2; */ public boolean hasFileCount() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 fileCount = 2; */ public long getFileCount() { return fileCount_; } // required uint64 directoryCount = 3; public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3; private long directoryCount_; /** * required uint64 directoryCount = 3; */ public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 directoryCount = 3; */ public long getDirectoryCount() { return directoryCount_; } // required uint64 quota = 4; public static final int QUOTA_FIELD_NUMBER = 4; private long quota_; /** * required uint64 quota = 4; */ public boolean hasQuota() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 quota = 4; */ public long getQuota() { return quota_; } // required uint64 spaceConsumed = 5; public static final int SPACECONSUMED_FIELD_NUMBER = 5; private long spaceConsumed_; /** * required uint64 spaceConsumed = 5; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 spaceConsumed = 5; */ public long getSpaceConsumed() { return spaceConsumed_; } // required uint64 spaceQuota = 6; public static final int SPACEQUOTA_FIELD_NUMBER = 6; private long spaceQuota_; /** * required uint64 spaceQuota = 6; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 spaceQuota = 6; */ public long getSpaceQuota() { return spaceQuota_; } // optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { return typeQuotaInfos_; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { return typeQuotaInfos_; } private void initFields() { length_ = 0L; fileCount_ = 0L; directoryCount_ = 0L; quota_ = 0L; spaceConsumed_ = 0L; spaceQuota_ = 0L; typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasFileCount()) { memoizedIsInitialized = 0; return false; } if (!hasDirectoryCount()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceConsumed()) { memoizedIsInitialized = 0; return false; } if (!hasSpaceQuota()) { memoizedIsInitialized = 0; return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, length_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, fileCount_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, directoryCount_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, quota_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, spaceQuota_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeMessage(7, typeQuotaInfos_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, length_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, fileCount_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, directoryCount_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, quota_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(5, spaceConsumed_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, spaceQuota_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(7, typeQuotaInfos_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj; boolean result = true; result = result && (hasLength() == other.hasLength()); if (hasLength()) { result = result && (getLength() == other.getLength()); } result = result && (hasFileCount() == other.hasFileCount()); if (hasFileCount()) { result = result && (getFileCount() == other.getFileCount()); } result = result && (hasDirectoryCount() == other.hasDirectoryCount()); if (hasDirectoryCount()) { result = result && (getDirectoryCount() == other.getDirectoryCount()); } result = result && (hasQuota() == other.hasQuota()); if (hasQuota()) { result = result && (getQuota() == other.getQuota()); } result = result && (hasSpaceConsumed() == other.hasSpaceConsumed()); if (hasSpaceConsumed()) { result = result && (getSpaceConsumed() == other.getSpaceConsumed()); } result = result && (hasSpaceQuota() == other.hasSpaceQuota()); if (hasSpaceQuota()) { result = result && (getSpaceQuota() == other.getSpaceQuota()); } result = result && (hasTypeQuotaInfos() == other.hasTypeQuotaInfos()); if (hasTypeQuotaInfos()) { result = result && getTypeQuotaInfos() .equals(other.getTypeQuotaInfos()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLength()); } if (hasFileCount()) { hash = (37 * hash) + FILECOUNT_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileCount()); } if (hasDirectoryCount()) { hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDirectoryCount()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + hashLong(getQuota()); } if (hasSpaceConsumed()) { hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSpaceConsumed()); } if (hasSpaceQuota()) { hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSpaceQuota()); } if (hasTypeQuotaInfos()) { hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfos().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ContentSummaryProto} * *
     **
     * Summary of a file or directory
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getTypeQuotaInfosFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); fileCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); directoryCount_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); spaceConsumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); spaceQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.length_ = length_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fileCount_ = fileCount_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.directoryCount_ = directoryCount_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.quota_ = quota_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.spaceConsumed_ = spaceConsumed_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.spaceQuota_ = spaceQuota_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } if (typeQuotaInfosBuilder_ == null) { result.typeQuotaInfos_ = typeQuotaInfos_; } else { result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this; if (other.hasLength()) { setLength(other.getLength()); } if (other.hasFileCount()) { setFileCount(other.getFileCount()); } if (other.hasDirectoryCount()) { setDirectoryCount(other.getDirectoryCount()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasSpaceConsumed()) { setSpaceConsumed(other.getSpaceConsumed()); } if (other.hasSpaceQuota()) { setSpaceQuota(other.getSpaceQuota()); } if (other.hasTypeQuotaInfos()) { mergeTypeQuotaInfos(other.getTypeQuotaInfos()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLength()) { return false; } if (!hasFileCount()) { return false; } if (!hasDirectoryCount()) { return false; } if (!hasQuota()) { return false; } if (!hasSpaceConsumed()) { return false; } if (!hasSpaceQuota()) { return false; } if (hasTypeQuotaInfos()) { if (!getTypeQuotaInfos().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 length = 1; private long length_ ; /** * required uint64 length = 1; */ public boolean hasLength() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 length = 1; */ public long getLength() { return length_; } /** * required uint64 length = 1; */ public Builder setLength(long value) { bitField0_ |= 0x00000001; length_ = value; onChanged(); return this; } /** * required uint64 length = 1; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000001); length_ = 0L; onChanged(); return this; } // required uint64 fileCount = 2; private long fileCount_ ; /** * required uint64 fileCount = 2; */ public boolean hasFileCount() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 fileCount = 2; */ public long getFileCount() { return fileCount_; } /** * required uint64 fileCount = 2; */ public Builder setFileCount(long value) { bitField0_ |= 0x00000002; fileCount_ = value; onChanged(); return this; } /** * required uint64 fileCount = 2; */ public Builder clearFileCount() { bitField0_ = (bitField0_ & ~0x00000002); fileCount_ = 0L; onChanged(); return this; } // required uint64 directoryCount = 3; private long directoryCount_ ; /** * required uint64 directoryCount = 3; */ public boolean hasDirectoryCount() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 directoryCount = 3; */ public long getDirectoryCount() { return directoryCount_; } /** * required uint64 directoryCount = 3; */ public Builder setDirectoryCount(long value) { bitField0_ |= 0x00000004; directoryCount_ = value; onChanged(); return this; } /** * required uint64 directoryCount = 3; */ public Builder clearDirectoryCount() { bitField0_ = (bitField0_ & ~0x00000004); directoryCount_ = 0L; onChanged(); return this; } // required uint64 quota = 4; private long quota_ ; /** * required uint64 quota = 4; */ public boolean hasQuota() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 quota = 4; */ public long getQuota() { return quota_; } /** * required uint64 quota = 4; */ public Builder setQuota(long value) { bitField0_ |= 0x00000008; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 4; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000008); quota_ = 0L; onChanged(); return this; } // required uint64 spaceConsumed = 5; private long spaceConsumed_ ; /** * required uint64 spaceConsumed = 5; */ public boolean hasSpaceConsumed() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 spaceConsumed = 5; */ public long getSpaceConsumed() { return spaceConsumed_; } /** * required uint64 spaceConsumed = 5; */ public Builder setSpaceConsumed(long value) { bitField0_ |= 0x00000010; spaceConsumed_ = value; onChanged(); return this; } /** * required uint64 spaceConsumed = 5; */ public Builder clearSpaceConsumed() { bitField0_ = (bitField0_ & ~0x00000010); spaceConsumed_ = 0L; onChanged(); return this; } // required uint64 spaceQuota = 6; private long spaceQuota_ ; /** * required uint64 spaceQuota = 6; */ public boolean hasSpaceQuota() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 spaceQuota = 6; */ public long getSpaceQuota() { return spaceQuota_; } /** * required uint64 spaceQuota = 6; */ public Builder setSpaceQuota(long value) { bitField0_ |= 0x00000020; spaceQuota_ = value; onChanged(); return this; } /** * required uint64 spaceQuota = 6; */ public Builder clearSpaceQuota() { bitField0_ = (bitField0_ & ~0x00000020); spaceQuota_ = 0L; onChanged(); return this; } // optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_; /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public boolean hasTypeQuotaInfos() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { return typeQuotaInfos_; } else { return typeQuotaInfosBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeQuotaInfos_ = value; onChanged(); } else { typeQuotaInfosBuilder_.setMessage(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder setTypeQuotaInfos( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = builderForValue.build(); onChanged(); } else { typeQuotaInfosBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) { if (typeQuotaInfosBuilder_ == null) { if (((bitField0_ & 0x00000040) == 0x00000040) && typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) { typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial(); } else { typeQuotaInfos_ = value; } onChanged(); } else { typeQuotaInfosBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public Builder clearTypeQuotaInfos() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); onChanged(); } else { typeQuotaInfosBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); return this; } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() { bitField0_ |= 0x00000040; onChanged(); return getTypeQuotaInfosFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() { if (typeQuotaInfosBuilder_ != null) { return typeQuotaInfosBuilder_.getMessageOrBuilder(); } else { return typeQuotaInfos_; } } /** * optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> getTypeQuotaInfosFieldBuilder() { if (typeQuotaInfosBuilder_ == null) { typeQuotaInfosBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>( typeQuotaInfos_, getParentForChildren(), isClean()); typeQuotaInfos_ = null; } return typeQuotaInfosBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto) } static { defaultInstance = new ContentSummaryProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto) } public interface StorageTypeQuotaInfosProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ int getTypeQuotaInfoCount(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ java.util.List getTypeQuotaInfoOrBuilderList(); /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} * *
   **
   * Storage type quota and usage information of a file or directory
   * 
*/ public static final class StorageTypeQuotaInfosProto extends com.google.protobuf.GeneratedMessage implements StorageTypeQuotaInfosProtoOrBuilder { // Use StorageTypeQuotaInfosProto.newBuilder() to construct. private StorageTypeQuotaInfosProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageTypeQuotaInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageTypeQuotaInfosProto defaultInstance; public static StorageTypeQuotaInfosProto getDefaultInstance() { return defaultInstance; } public StorageTypeQuotaInfosProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypeQuotaInfosProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { typeQuotaInfo_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } typeQuotaInfo_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageTypeQuotaInfosProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageTypeQuotaInfosProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1; private java.util.List typeQuotaInfo_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoOrBuilderList() { return typeQuotaInfo_; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public int getTypeQuotaInfoCount() { return typeQuotaInfo_.size(); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { return typeQuotaInfo_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { return typeQuotaInfo_.get(index); } private void initFields() { typeQuotaInfo_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < typeQuotaInfo_.size(); i++) { output.writeMessage(1, typeQuotaInfo_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < typeQuotaInfo_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, typeQuotaInfo_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj; boolean result = true; result = result && getTypeQuotaInfoList() .equals(other.getTypeQuotaInfoList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getTypeQuotaInfoCount() > 0) { hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER; hash = (53 * hash) + getTypeQuotaInfoList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto} * *
     **
     * Storage type quota and usage information of a file or directory
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getTypeQuotaInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { typeQuotaInfoBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this); int from_bitField0_ = bitField0_; if (typeQuotaInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_); bitField0_ = (bitField0_ & ~0x00000001); } result.typeQuotaInfo_ = typeQuotaInfo_; } else { result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this; if (typeQuotaInfoBuilder_ == null) { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfo_.isEmpty()) { typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.addAll(other.typeQuotaInfo_); } onChanged(); } } else { if (!other.typeQuotaInfo_.isEmpty()) { if (typeQuotaInfoBuilder_.isEmpty()) { typeQuotaInfoBuilder_.dispose(); typeQuotaInfoBuilder_ = null; typeQuotaInfo_ = other.typeQuotaInfo_; bitField0_ = (bitField0_ & ~0x00000001); typeQuotaInfoBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getTypeQuotaInfoFieldBuilder() : null; } else { typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getTypeQuotaInfoCount(); i++) { if (!getTypeQuotaInfo(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; private java.util.List typeQuotaInfo_ = java.util.Collections.emptyList(); private void ensureTypeQuotaInfoIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { typeQuotaInfo_ = new java.util.ArrayList(typeQuotaInfo_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_; /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoList() { if (typeQuotaInfoBuilder_ == null) { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } else { return typeQuotaInfoBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public int getTypeQuotaInfoCount() { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.size(); } else { return typeQuotaInfoBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, value); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder setTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.set(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) { if (typeQuotaInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, value); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addTypeQuotaInfo( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.add(index, builderForValue.build()); onChanged(); } else { typeQuotaInfoBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder addAllTypeQuotaInfo( java.lang.Iterable values) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); super.addAll(values, typeQuotaInfo_); onChanged(); } else { typeQuotaInfoBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder clearTypeQuotaInfo() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { typeQuotaInfoBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public Builder removeTypeQuotaInfo(int index) { if (typeQuotaInfoBuilder_ == null) { ensureTypeQuotaInfoIsMutable(); typeQuotaInfo_.remove(index); onChanged(); } else { typeQuotaInfoBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder( int index) { if (typeQuotaInfoBuilder_ == null) { return typeQuotaInfo_.get(index); } else { return typeQuotaInfoBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoOrBuilderList() { if (typeQuotaInfoBuilder_ != null) { return typeQuotaInfoBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(typeQuotaInfo_); } } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() { return getTypeQuotaInfoFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder( int index) { return getTypeQuotaInfoFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1; */ public java.util.List getTypeQuotaInfoBuilderList() { return getTypeQuotaInfoFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> getTypeQuotaInfoFieldBuilder() { if (typeQuotaInfoBuilder_ == null) { typeQuotaInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>( typeQuotaInfo_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); typeQuotaInfo_ = null; } return typeQuotaInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) } static { defaultInstance = new StorageTypeQuotaInfosProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto) } public interface StorageTypeQuotaInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.StorageTypeProto type = 1; /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ boolean hasType(); /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType(); // required uint64 quota = 2; /** * required uint64 quota = 2; */ boolean hasQuota(); /** * required uint64 quota = 2; */ long getQuota(); // required uint64 consumed = 3; /** * required uint64 consumed = 3; */ boolean hasConsumed(); /** * required uint64 consumed = 3; */ long getConsumed(); } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class StorageTypeQuotaInfoProto extends com.google.protobuf.GeneratedMessage implements StorageTypeQuotaInfoProtoOrBuilder { // Use StorageTypeQuotaInfoProto.newBuilder() to construct. private StorageTypeQuotaInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageTypeQuotaInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageTypeQuotaInfoProto defaultInstance; public static StorageTypeQuotaInfoProto getDefaultInstance() { return defaultInstance; } public StorageTypeQuotaInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypeQuotaInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = value; } break; } case 16: { bitField0_ |= 0x00000002; quota_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; consumed_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageTypeQuotaInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageTypeQuotaInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.StorageTypeProto type = 1; public static final int TYPE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_; /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { return type_; } // required uint64 quota = 2; public static final int QUOTA_FIELD_NUMBER = 2; private long quota_; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } // required uint64 consumed = 3; public static final int CONSUMED_FIELD_NUMBER = 3; private long consumed_; /** * required uint64 consumed = 3; */ public boolean hasConsumed() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 consumed = 3; */ public long getConsumed() { return consumed_; } private void initFields() { type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; quota_ = 0L; consumed_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasType()) { memoizedIsInitialized = 0; return false; } if (!hasQuota()) { memoizedIsInitialized = 0; return false; } if (!hasConsumed()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, type_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, quota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, consumed_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, type_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, quota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, consumed_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj; boolean result = true; result = result && (hasType() == other.hasType()); if (hasType()) { result = result && (getType() == other.getType()); } result = result && (hasQuota() == other.hasQuota()); if (hasQuota()) { result = result && (getQuota() == other.getQuota()); } result = result && (hasConsumed() == other.hasConsumed()); if (hasConsumed()) { result = result && (getConsumed() == other.getConsumed()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getType()); } if (hasQuota()) { hash = (37 * hash) + QUOTA_FIELD_NUMBER; hash = (53 * hash) + hashLong(getQuota()); } if (hasConsumed()) { hash = (37 * hash) + CONSUMED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getConsumed()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; bitField0_ = (bitField0_ & ~0x00000001); quota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); consumed_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.quota_ = quota_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.consumed_ = consumed_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } if (other.hasQuota()) { setQuota(other.getQuota()); } if (other.hasConsumed()) { setConsumed(other.getConsumed()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasType()) { return false; } if (!hasQuota()) { return false; } if (!hasConsumed()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.StorageTypeProto type = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() { return type_; } /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.StorageTypeProto type = 1; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; onChanged(); return this; } // required uint64 quota = 2; private long quota_ ; /** * required uint64 quota = 2; */ public boolean hasQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 quota = 2; */ public long getQuota() { return quota_; } /** * required uint64 quota = 2; */ public Builder setQuota(long value) { bitField0_ |= 0x00000002; quota_ = value; onChanged(); return this; } /** * required uint64 quota = 2; */ public Builder clearQuota() { bitField0_ = (bitField0_ & ~0x00000002); quota_ = 0L; onChanged(); return this; } // required uint64 consumed = 3; private long consumed_ ; /** * required uint64 consumed = 3; */ public boolean hasConsumed() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 consumed = 3; */ public long getConsumed() { return consumed_; } /** * required uint64 consumed = 3; */ public Builder setConsumed(long value) { bitField0_ |= 0x00000004; consumed_ = value; onChanged(); return this; } /** * required uint64 consumed = 3; */ public Builder clearConsumed() { bitField0_ = (bitField0_ & ~0x00000004); consumed_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) } static { defaultInstance = new StorageTypeQuotaInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto) } public interface CorruptFileBlocksProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated string files = 1; /** * repeated string files = 1; */ java.util.List getFilesList(); /** * repeated string files = 1; */ int getFilesCount(); /** * repeated string files = 1; */ java.lang.String getFiles(int index); /** * repeated string files = 1; */ com.google.protobuf.ByteString getFilesBytes(int index); // required string cookie = 2; /** * required string cookie = 2; */ boolean hasCookie(); /** * required string cookie = 2; */ java.lang.String getCookie(); /** * required string cookie = 2; */ com.google.protobuf.ByteString getCookieBytes(); } /** * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} * *
   **
   * Contains a list of paths corresponding to corrupt files and a cookie
   * used for iterative calls to NameNode.listCorruptFileBlocks.
   * 
*/ public static final class CorruptFileBlocksProto extends com.google.protobuf.GeneratedMessage implements CorruptFileBlocksProtoOrBuilder { // Use CorruptFileBlocksProto.newBuilder() to construct. private CorruptFileBlocksProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CorruptFileBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CorruptFileBlocksProto defaultInstance; public static CorruptFileBlocksProto getDefaultInstance() { return defaultInstance; } public CorruptFileBlocksProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CorruptFileBlocksProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { files_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000001; } files_.add(input.readBytes()); break; } case 18: { bitField0_ |= 0x00000001; cookie_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { files_ = new com.google.protobuf.UnmodifiableLazyStringList(files_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CorruptFileBlocksProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CorruptFileBlocksProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // repeated string files = 1; public static final int FILES_FIELD_NUMBER = 1; private com.google.protobuf.LazyStringList files_; /** * repeated string files = 1; */ public java.util.List getFilesList() { return files_; } /** * repeated string files = 1; */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; */ public com.google.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } // required string cookie = 2; public static final int COOKIE_FIELD_NUMBER = 2; private java.lang.Object cookie_; /** * required string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } } /** * required string cookie = 2; */ public com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { files_ = com.google.protobuf.LazyStringArrayList.EMPTY; cookie_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasCookie()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < files_.size(); i++) { output.writeBytes(1, files_.getByteString(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(2, getCookieBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < files_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(files_.getByteString(i)); } size += dataSize; size += 1 * getFilesList().size(); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getCookieBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj; boolean result = true; result = result && getFilesList() .equals(other.getFilesList()); result = result && (hasCookie() == other.hasCookie()); if (hasCookie()) { result = result && getCookie() .equals(other.getCookie()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getFilesCount() > 0) { hash = (37 * hash) + FILES_FIELD_NUMBER; hash = (53 * hash) + getFilesList().hashCode(); } if (hasCookie()) { hash = (37 * hash) + COOKIE_FIELD_NUMBER; hash = (53 * hash) + getCookie().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto} * *
     **
     * Contains a list of paths corresponding to corrupt files and a cookie
     * used for iterative calls to NameNode.listCorruptFileBlocks.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); files_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); cookie_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { files_ = new com.google.protobuf.UnmodifiableLazyStringList( files_); bitField0_ = (bitField0_ & ~0x00000001); } result.files_ = files_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.cookie_ = cookie_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this; if (!other.files_.isEmpty()) { if (files_.isEmpty()) { files_ = other.files_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureFilesIsMutable(); files_.addAll(other.files_); } onChanged(); } if (other.hasCookie()) { bitField0_ |= 0x00000002; cookie_ = other.cookie_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasCookie()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated string files = 1; private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureFilesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { files_ = new com.google.protobuf.LazyStringArrayList(files_); bitField0_ |= 0x00000001; } } /** * repeated string files = 1; */ public java.util.List getFilesList() { return java.util.Collections.unmodifiableList(files_); } /** * repeated string files = 1; */ public int getFilesCount() { return files_.size(); } /** * repeated string files = 1; */ public java.lang.String getFiles(int index) { return files_.get(index); } /** * repeated string files = 1; */ public com.google.protobuf.ByteString getFilesBytes(int index) { return files_.getByteString(index); } /** * repeated string files = 1; */ public Builder setFiles( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.set(index, value); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addFiles( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addAllFiles( java.lang.Iterable values) { ensureFilesIsMutable(); super.addAll(values, files_); onChanged(); return this; } /** * repeated string files = 1; */ public Builder clearFiles() { files_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string files = 1; */ public Builder addFilesBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureFilesIsMutable(); files_.add(value); onChanged(); return this; } // required string cookie = 2; private java.lang.Object cookie_ = ""; /** * required string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); cookie_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string cookie = 2; */ public com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string cookie = 2; */ public Builder setCookie( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } /** * required string cookie = 2; */ public Builder clearCookie() { bitField0_ = (bitField0_ & ~0x00000002); cookie_ = getDefaultInstance().getCookie(); onChanged(); return this; } /** * required string cookie = 2; */ public Builder setCookieBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto) } static { defaultInstance = new CorruptFileBlocksProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto) } public interface FsPermissionProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 perm = 1; /** * required uint32 perm = 1; * *
     * Actually a short - only 16bits used
     * 
*/ boolean hasPerm(); /** * required uint32 perm = 1; * *
     * Actually a short - only 16bits used
     * 
*/ int getPerm(); } /** * Protobuf type {@code hadoop.hdfs.FsPermissionProto} * *
   **
   * File or Directory permision - same spec as posix
   * 
*/ public static final class FsPermissionProto extends com.google.protobuf.GeneratedMessage implements FsPermissionProtoOrBuilder { // Use FsPermissionProto.newBuilder() to construct. private FsPermissionProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FsPermissionProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FsPermissionProto defaultInstance; public static FsPermissionProto getDefaultInstance() { return defaultInstance; } public FsPermissionProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FsPermissionProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; perm_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FsPermissionProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FsPermissionProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 perm = 1; public static final int PERM_FIELD_NUMBER = 1; private int perm_; /** * required uint32 perm = 1; * *
     * Actually a short - only 16bits used
     * 
*/ public boolean hasPerm() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 perm = 1; * *
     * Actually a short - only 16bits used
     * 
*/ public int getPerm() { return perm_; } private void initFields() { perm_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPerm()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, perm_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, perm_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) obj; boolean result = true; result = result && (hasPerm() == other.hasPerm()); if (hasPerm()) { result = result && (getPerm() == other.getPerm()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPerm()) { hash = (37 * hash) + PERM_FIELD_NUMBER; hash = (53 * hash) + getPerm(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FsPermissionProto} * *
     **
     * File or Directory permision - same spec as posix
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); perm_ = 0; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.perm_ = perm_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) return this; if (other.hasPerm()) { setPerm(other.getPerm()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPerm()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 perm = 1; private int perm_ ; /** * required uint32 perm = 1; * *
       * Actually a short - only 16bits used
       * 
*/ public boolean hasPerm() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 perm = 1; * *
       * Actually a short - only 16bits used
       * 
*/ public int getPerm() { return perm_; } /** * required uint32 perm = 1; * *
       * Actually a short - only 16bits used
       * 
*/ public Builder setPerm(int value) { bitField0_ |= 0x00000001; perm_ = value; onChanged(); return this; } /** * required uint32 perm = 1; * *
       * Actually a short - only 16bits used
       * 
*/ public Builder clearPerm() { bitField0_ = (bitField0_ & ~0x00000001); perm_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsPermissionProto) } static { defaultInstance = new FsPermissionProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsPermissionProto) } public interface StorageTypesProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); } /** * Protobuf type {@code hadoop.hdfs.StorageTypesProto} * *
   **
   * A list of storage types. 
   * 
*/ public static final class StorageTypesProto extends com.google.protobuf.GeneratedMessage implements StorageTypesProtoOrBuilder { // Use StorageTypesProto.newBuilder() to construct. private StorageTypesProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageTypesProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageTypesProto defaultInstance; public static StorageTypesProto getDefaultInstance() { return defaultInstance; } public StorageTypesProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageTypesProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } storageTypes_.add(value); } break; } case 10: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } storageTypes_.add(value); } } input.popLimit(oldLimit); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageTypesProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageTypesProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; public static final int STORAGETYPES_FIELD_NUMBER = 1; private java.util.List storageTypes_; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public java.util.List getStorageTypesList() { return storageTypes_; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } private void initFields() { storageTypes_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(1, storageTypes_.get(i).getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i).getNumber()); } size += dataSize; size += 1 * storageTypes_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj; boolean result = true; result = result && getStorageTypesList() .equals(other.getStorageTypesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getStorageTypesList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageTypesProto} * *
     **
     * A list of storage types. 
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) == 0x00000001)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000001); } result.storageTypes_ = storageTypes_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this; if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000001; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public java.util.List getStorageTypesList() { return java.util.Collections.unmodifiableList(storageTypes_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); super.addAll(values, storageTypes_); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto) } static { defaultInstance = new StorageTypesProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto) } public interface BlockStoragePolicyProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 policyId = 1; /** * required uint32 policyId = 1; */ boolean hasPolicyId(); /** * required uint32 policyId = 1; */ int getPolicyId(); // required string name = 2; /** * required string name = 2; */ boolean hasName(); /** * required string name = 2; */ java.lang.String getName(); /** * required string name = 2; */ com.google.protobuf.ByteString getNameBytes(); // required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ boolean hasCreationPolicy(); /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy(); /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder(); // optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ boolean hasCreationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder(); // optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ boolean hasReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy(); /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} * *
   **
   * Block replica storage policy.
   * 
*/ public static final class BlockStoragePolicyProto extends com.google.protobuf.GeneratedMessage implements BlockStoragePolicyProtoOrBuilder { // Use BlockStoragePolicyProto.newBuilder() to construct. private BlockStoragePolicyProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private BlockStoragePolicyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final BlockStoragePolicyProto defaultInstance; public static BlockStoragePolicyProto getDefaultInstance() { return defaultInstance; } public BlockStoragePolicyProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockStoragePolicyProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; policyId_ = input.readUInt32(); break; } case 18: { bitField0_ |= 0x00000002; name_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = creationPolicy_.toBuilder(); } creationPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(creationPolicy_); creationPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = creationFallbackPolicy_.toBuilder(); } creationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(creationFallbackPolicy_); creationFallbackPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = replicationFallbackPolicy_.toBuilder(); } replicationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(replicationFallbackPolicy_); replicationFallbackPolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public BlockStoragePolicyProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BlockStoragePolicyProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 policyId = 1; public static final int POLICYID_FIELD_NUMBER = 1; private int policyId_; /** * required uint32 policyId = 1; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 policyId = 1; */ public int getPolicyId() { return policyId_; } // required string name = 2; public static final int NAME_FIELD_NUMBER = 2; private java.lang.Object name_; /** * required string name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string name = 2; */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * required string name = 2; */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; public static final int CREATIONPOLICY_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_; /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { return creationPolicy_; } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
     * a list of storage types for storing the block replicas when creating a
     * block.
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { return creationPolicy_; } // optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_; /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { return creationFallbackPolicy_; } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
     * A list of storage types for creation fallback storage.
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { return creationFallbackPolicy_; } // optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { return replicationFallbackPolicy_; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { return replicationFallbackPolicy_; } private void initFields() { policyId_ = 0; name_ = ""; creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPolicyId()) { memoizedIsInitialized = 0; return false; } if (!hasName()) { memoizedIsInitialized = 0; return false; } if (!hasCreationPolicy()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, policyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, creationPolicy_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, creationFallbackPolicy_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeMessage(5, replicationFallbackPolicy_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, policyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, creationPolicy_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, creationFallbackPolicy_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, replicationFallbackPolicy_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj; boolean result = true; result = result && (hasPolicyId() == other.hasPolicyId()); if (hasPolicyId()) { result = result && (getPolicyId() == other.getPolicyId()); } result = result && (hasName() == other.hasName()); if (hasName()) { result = result && getName() .equals(other.getName()); } result = result && (hasCreationPolicy() == other.hasCreationPolicy()); if (hasCreationPolicy()) { result = result && getCreationPolicy() .equals(other.getCreationPolicy()); } result = result && (hasCreationFallbackPolicy() == other.hasCreationFallbackPolicy()); if (hasCreationFallbackPolicy()) { result = result && getCreationFallbackPolicy() .equals(other.getCreationFallbackPolicy()); } result = result && (hasReplicationFallbackPolicy() == other.hasReplicationFallbackPolicy()); if (hasReplicationFallbackPolicy()) { result = result && getReplicationFallbackPolicy() .equals(other.getReplicationFallbackPolicy()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPolicyId()) { hash = (37 * hash) + POLICYID_FIELD_NUMBER; hash = (53 * hash) + getPolicyId(); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasCreationPolicy()) { hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationPolicy().hashCode(); } if (hasCreationFallbackPolicy()) { hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getCreationFallbackPolicy().hashCode(); } if (hasReplicationFallbackPolicy()) { hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER; hash = (53 * hash) + getReplicationFallbackPolicy().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto} * *
     **
     * Block replica storage policy.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCreationPolicyFieldBuilder(); getCreationFallbackPolicyFieldBuilder(); getReplicationFallbackPolicyFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); policyId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); name_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (creationPolicyBuilder_ == null) { creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } else { creationPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } else { creationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); } else { replicationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.policyId_ = policyId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.name_ = name_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (creationPolicyBuilder_ == null) { result.creationPolicy_ = creationPolicy_; } else { result.creationPolicy_ = creationPolicyBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (creationFallbackPolicyBuilder_ == null) { result.creationFallbackPolicy_ = creationFallbackPolicy_; } else { result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } if (replicationFallbackPolicyBuilder_ == null) { result.replicationFallbackPolicy_ = replicationFallbackPolicy_; } else { result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this; if (other.hasPolicyId()) { setPolicyId(other.getPolicyId()); } if (other.hasName()) { bitField0_ |= 0x00000002; name_ = other.name_; onChanged(); } if (other.hasCreationPolicy()) { mergeCreationPolicy(other.getCreationPolicy()); } if (other.hasCreationFallbackPolicy()) { mergeCreationFallbackPolicy(other.getCreationFallbackPolicy()); } if (other.hasReplicationFallbackPolicy()) { mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPolicyId()) { return false; } if (!hasName()) { return false; } if (!hasCreationPolicy()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 policyId = 1; private int policyId_ ; /** * required uint32 policyId = 1; */ public boolean hasPolicyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 policyId = 1; */ public int getPolicyId() { return policyId_; } /** * required uint32 policyId = 1; */ public Builder setPolicyId(int value) { bitField0_ |= 0x00000001; policyId_ = value; onChanged(); return this; } /** * required uint32 policyId = 1; */ public Builder clearPolicyId() { bitField0_ = (bitField0_ & ~0x00000001); policyId_ = 0; onChanged(); return this; } // required string name = 2; private java.lang.Object name_ = ""; /** * required string name = 2; */ public boolean hasName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string name = 2; */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); name_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string name = 2; */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string name = 2; */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; name_ = value; onChanged(); return this; } /** * required string name = 2; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000002); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * required string name = 2; */ public Builder setNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; name_ = value; onChanged(); return this; } // required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_; /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public boolean hasCreationPolicy() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() { if (creationPolicyBuilder_ == null) { return creationPolicy_; } else { return creationPolicyBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationPolicy_ = value; onChanged(); } else { creationPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public Builder setCreationPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationPolicyBuilder_ == null) { creationPolicy_ = builderForValue.build(); onChanged(); } else { creationPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationPolicyBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationPolicy_).mergeFrom(value).buildPartial(); } else { creationPolicy_ = value; } onChanged(); } else { creationPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public Builder clearCreationPolicy() { if (creationPolicyBuilder_ == null) { creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); onChanged(); } else { creationPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() { bitField0_ |= 0x00000004; onChanged(); return getCreationPolicyFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() { if (creationPolicyBuilder_ != null) { return creationPolicyBuilder_.getMessageOrBuilder(); } else { return creationPolicy_; } } /** * required .hadoop.hdfs.StorageTypesProto creationPolicy = 3; * *
       * a list of storage types for storing the block replicas when creating a
       * block.
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationPolicyFieldBuilder() { if (creationPolicyBuilder_ == null) { creationPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( creationPolicy_, getParentForChildren(), isClean()); creationPolicy_ = null; } return creationPolicyBuilder_; } // optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_; /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public boolean hasCreationFallbackPolicy() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() { if (creationFallbackPolicyBuilder_ == null) { return creationFallbackPolicy_; } else { return creationFallbackPolicyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } creationFallbackPolicy_ = value; onChanged(); } else { creationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public Builder setCreationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = builderForValue.build(); onChanged(); } else { creationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (creationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationFallbackPolicy_).mergeFrom(value).buildPartial(); } else { creationFallbackPolicy_ = value; } onChanged(); } else { creationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public Builder clearCreationFallbackPolicy() { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); onChanged(); } else { creationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCreationFallbackPolicyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() { if (creationFallbackPolicyBuilder_ != null) { return creationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return creationFallbackPolicy_; } } /** * optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4; * *
       * A list of storage types for creation fallback storage.
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getCreationFallbackPolicyFieldBuilder() { if (creationFallbackPolicyBuilder_ == null) { creationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( creationFallbackPolicy_, getParentForChildren(), isClean()); creationFallbackPolicy_ = null; } return creationFallbackPolicyBuilder_; } // optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_; /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public boolean hasReplicationFallbackPolicy() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() { if (replicationFallbackPolicyBuilder_ == null) { return replicationFallbackPolicy_; } else { return replicationFallbackPolicyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } replicationFallbackPolicy_ = value; onChanged(); } else { replicationFallbackPolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder setReplicationFallbackPolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = builderForValue.build(); onChanged(); } else { replicationFallbackPolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) { if (replicationFallbackPolicyBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) { replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(replicationFallbackPolicy_).mergeFrom(value).buildPartial(); } else { replicationFallbackPolicy_ = value; } onChanged(); } else { replicationFallbackPolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public Builder clearReplicationFallbackPolicy() { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance(); onChanged(); } else { replicationFallbackPolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() { bitField0_ |= 0x00000010; onChanged(); return getReplicationFallbackPolicyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() { if (replicationFallbackPolicyBuilder_ != null) { return replicationFallbackPolicyBuilder_.getMessageOrBuilder(); } else { return replicationFallbackPolicy_; } } /** * optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> getReplicationFallbackPolicyFieldBuilder() { if (replicationFallbackPolicyBuilder_ == null) { replicationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>( replicationFallbackPolicy_, getParentForChildren(), isClean()); replicationFallbackPolicy_ = null; } return replicationFallbackPolicyBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto) } static { defaultInstance = new BlockStoragePolicyProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto) } public interface StorageUuidsProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated string storageUuids = 1; /** * repeated string storageUuids = 1; */ java.util.List getStorageUuidsList(); /** * repeated string storageUuids = 1; */ int getStorageUuidsCount(); /** * repeated string storageUuids = 1; */ java.lang.String getStorageUuids(int index); /** * repeated string storageUuids = 1; */ com.google.protobuf.ByteString getStorageUuidsBytes(int index); } /** * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} * *
   **
   * A list of storage IDs. 
   * 
*/ public static final class StorageUuidsProto extends com.google.protobuf.GeneratedMessage implements StorageUuidsProtoOrBuilder { // Use StorageUuidsProto.newBuilder() to construct. private StorageUuidsProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageUuidsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageUuidsProto defaultInstance; public static StorageUuidsProto getDefaultInstance() { return defaultInstance; } public StorageUuidsProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageUuidsProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { storageUuids_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000001; } storageUuids_.add(input.readBytes()); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList(storageUuids_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageUuidsProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageUuidsProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated string storageUuids = 1; public static final int STORAGEUUIDS_FIELD_NUMBER = 1; private com.google.protobuf.LazyStringList storageUuids_; /** * repeated string storageUuids = 1; */ public java.util.List getStorageUuidsList() { return storageUuids_; } /** * repeated string storageUuids = 1; */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; */ public com.google.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } private void initFields() { storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < storageUuids_.size(); i++) { output.writeBytes(1, storageUuids_.getByteString(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < storageUuids_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(storageUuids_.getByteString(i)); } size += dataSize; size += 1 * getStorageUuidsList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) obj; boolean result = true; result = result && getStorageUuidsList() .equals(other.getStorageUuidsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getStorageUuidsCount() > 0) { hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageUuidsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageUuidsProto} * *
     **
     * A list of storage IDs. 
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) == 0x00000001)) { storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList( storageUuids_); bitField0_ = (bitField0_ & ~0x00000001); } result.storageUuids_ = storageUuids_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance()) return this; if (!other.storageUuids_.isEmpty()) { if (storageUuids_.isEmpty()) { storageUuids_ = other.storageUuids_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureStorageUuidsIsMutable(); storageUuids_.addAll(other.storageUuids_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated string storageUuids = 1; private com.google.protobuf.LazyStringList storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageUuidsIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { storageUuids_ = new com.google.protobuf.LazyStringArrayList(storageUuids_); bitField0_ |= 0x00000001; } } /** * repeated string storageUuids = 1; */ public java.util.List getStorageUuidsList() { return java.util.Collections.unmodifiableList(storageUuids_); } /** * repeated string storageUuids = 1; */ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 1; */ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 1; */ public com.google.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } /** * repeated string storageUuids = 1; */ public Builder setStorageUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.set(index, value); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addStorageUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addAllStorageUuids( java.lang.Iterable values) { ensureStorageUuidsIsMutable(); super.addAll(values, storageUuids_); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder clearStorageUuids() { storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string storageUuids = 1; */ public Builder addStorageUuidsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageUuidsProto) } static { defaultInstance = new StorageUuidsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageUuidsProto) } public interface LocatedBlockProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.ExtendedBlockProto b = 1; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ boolean hasB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); // required uint64 offset = 2; /** * required uint64 offset = 2; * *
     * offset of first byte of block in the file
     * 
*/ boolean hasOffset(); /** * required uint64 offset = 2; * *
     * offset of first byte of block in the file
     * 
*/ long getOffset(); // repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ java.util.List getLocsList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ int getLocsCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ java.util.List getLocsOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index); // required bool corrupt = 4; /** * required bool corrupt = 4; * *
     * true if all replicas of a block are corrupt, else false
     * 
*/ boolean hasCorrupt(); /** * required bool corrupt = 4; * *
     * true if all replicas of a block are corrupt, else false
     * 
*/ boolean getCorrupt(); // required .hadoop.common.TokenProto blockToken = 5; /** * required .hadoop.common.TokenProto blockToken = 5; */ boolean hasBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken(); /** * required .hadoop.common.TokenProto blockToken = 5; */ org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder(); // repeated bool isCached = 6 [packed = true]; /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ java.util.List getIsCachedList(); /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ int getIsCachedCount(); /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ boolean getIsCached(int index); // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); // repeated string storageIDs = 8; /** * repeated string storageIDs = 8; */ java.util.List getStorageIDsList(); /** * repeated string storageIDs = 8; */ int getStorageIDsCount(); /** * repeated string storageIDs = 8; */ java.lang.String getStorageIDs(int index); /** * repeated string storageIDs = 8; */ com.google.protobuf.ByteString getStorageIDsBytes(int index); } /** * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} * *
   **
   * A LocatedBlock gives information about a block and its location.
   * 
*/ public static final class LocatedBlockProto extends com.google.protobuf.GeneratedMessage implements LocatedBlockProtoOrBuilder { // Use LocatedBlockProto.newBuilder() to construct. private LocatedBlockProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private LocatedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final LocatedBlockProto defaultInstance; public static LocatedBlockProto getDefaultInstance() { return defaultInstance; } public LocatedBlockProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocatedBlockProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = b_.toBuilder(); } b_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(b_); b_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; offset_ = input.readUInt64(); break; } case 26: { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { locs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } locs_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } case 32: { bitField0_ |= 0x00000004; corrupt_ = input.readBool(); break; } case 42: { org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = blockToken_.toBuilder(); } blockToken_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(blockToken_); blockToken_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 48: { if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { isCached_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } isCached_.add(input.readBool()); break; } case 50: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000020) == 0x00000020) && input.getBytesUntilLimit() > 0) { isCached_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } while (input.getBytesUntilLimit() > 0) { isCached_.add(input.readBool()); } input.popLimit(limit); break; } case 56: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(value); } break; } case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } storageTypes_.add(value); } } input.popLimit(oldLimit); break; } case 66: { if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { storageIDs_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000080; } storageIDs_.add(input.readBytes()); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { locs_ = java.util.Collections.unmodifiableList(locs_); } if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { isCached_ = java.util.Collections.unmodifiableList(isCached_); } if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(storageIDs_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public LocatedBlockProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new LocatedBlockProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto b = 1; public static final int B_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { return b_; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { return b_; } // required uint64 offset = 2; public static final int OFFSET_FIELD_NUMBER = 2; private long offset_; /** * required uint64 offset = 2; * *
     * offset of first byte of block in the file
     * 
*/ public boolean hasOffset() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 offset = 2; * *
     * offset of first byte of block in the file
     * 
*/ public long getOffset() { return offset_; } // repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; public static final int LOCS_FIELD_NUMBER = 3; private java.util.List locs_; /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ public java.util.List getLocsList() { return locs_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ public java.util.List getLocsOrBuilderList() { return locs_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ public int getLocsCount() { return locs_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { return locs_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
     * Locations ordered by proximity to client ip
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { return locs_.get(index); } // required bool corrupt = 4; public static final int CORRUPT_FIELD_NUMBER = 4; private boolean corrupt_; /** * required bool corrupt = 4; * *
     * true if all replicas of a block are corrupt, else false
     * 
*/ public boolean hasCorrupt() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool corrupt = 4; * *
     * true if all replicas of a block are corrupt, else false
     * 
*/ public boolean getCorrupt() { return corrupt_; } // required .hadoop.common.TokenProto blockToken = 5; public static final int BLOCKTOKEN_FIELD_NUMBER = 5; private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_; /** * required .hadoop.common.TokenProto blockToken = 5; */ public boolean hasBlockToken() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { return blockToken_; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { return blockToken_; } // repeated bool isCached = 6 [packed = true]; public static final int ISCACHED_FIELD_NUMBER = 6; private java.util.List isCached_; /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ public java.util.List getIsCachedList() { return isCached_; } /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ public int getIsCachedCount() { return isCached_.size(); } /** * repeated bool isCached = 6 [packed = true]; * *
     * if a location in locs is cached
     * 
*/ public boolean getIsCached(int index) { return isCached_.get(index); } private int isCachedMemoizedSerializedSize = -1; // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; public static final int STORAGETYPES_FIELD_NUMBER = 7; private java.util.List storageTypes_; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return storageTypes_; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } // repeated string storageIDs = 8; public static final int STORAGEIDS_FIELD_NUMBER = 8; private com.google.protobuf.LazyStringList storageIDs_; /** * repeated string storageIDs = 8; */ public java.util.List getStorageIDsList() { return storageIDs_; } /** * repeated string storageIDs = 8; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; */ public com.google.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } private void initFields() { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); offset_ = 0L; locs_ = java.util.Collections.emptyList(); corrupt_ = false; blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance(); isCached_ = java.util.Collections.emptyList(); storageTypes_ = java.util.Collections.emptyList(); storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasB()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasCorrupt()) { memoizedIsInitialized = 0; return false; } if (!hasBlockToken()) { memoizedIsInitialized = 0; return false; } if (!getB().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (!getBlockToken().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, b_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, offset_); } for (int i = 0; i < locs_.size(); i++) { output.writeMessage(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(4, corrupt_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(5, blockToken_); } if (getIsCachedList().size() > 0) { output.writeRawVarint32(50); output.writeRawVarint32(isCachedMemoizedSerializedSize); } for (int i = 0; i < isCached_.size(); i++) { output.writeBoolNoTag(isCached_.get(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(7, storageTypes_.get(i).getNumber()); } for (int i = 0; i < storageIDs_.size(); i++) { output.writeBytes(8, storageIDs_.getByteString(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, b_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, offset_); } for (int i = 0; i < locs_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, locs_.get(i)); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(4, corrupt_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, blockToken_); } { int dataSize = 0; dataSize = 1 * getIsCachedList().size(); size += dataSize; if (!getIsCachedList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } isCachedMemoizedSerializedSize = dataSize; } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i).getNumber()); } size += dataSize; size += 1 * storageTypes_.size(); } { int dataSize = 0; for (int i = 0; i < storageIDs_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(storageIDs_.getByteString(i)); } size += dataSize; size += 1 * getStorageIDsList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj; boolean result = true; result = result && (hasB() == other.hasB()); if (hasB()) { result = result && getB() .equals(other.getB()); } result = result && (hasOffset() == other.hasOffset()); if (hasOffset()) { result = result && (getOffset() == other.getOffset()); } result = result && getLocsList() .equals(other.getLocsList()); result = result && (hasCorrupt() == other.hasCorrupt()); if (hasCorrupt()) { result = result && (getCorrupt() == other.getCorrupt()); } result = result && (hasBlockToken() == other.hasBlockToken()); if (hasBlockToken()) { result = result && getBlockToken() .equals(other.getBlockToken()); } result = result && getIsCachedList() .equals(other.getIsCachedList()); result = result && getStorageTypesList() .equals(other.getStorageTypesList()); result = result && getStorageIDsList() .equals(other.getStorageIDsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasB()) { hash = (37 * hash) + B_FIELD_NUMBER; hash = (53 * hash) + getB().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + hashLong(getOffset()); } if (getLocsCount() > 0) { hash = (37 * hash) + LOCS_FIELD_NUMBER; hash = (53 * hash) + getLocsList().hashCode(); } if (hasCorrupt()) { hash = (37 * hash) + CORRUPT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCorrupt()); } if (hasBlockToken()) { hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER; hash = (53 * hash) + getBlockToken().hashCode(); } if (getIsCachedCount() > 0) { hash = (37 * hash) + ISCACHED_FIELD_NUMBER; hash = (53 * hash) + getIsCachedList().hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getStorageTypesList()); } if (getStorageIDsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIDsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.LocatedBlockProto} * *
     **
     * A LocatedBlock gives information about a block and its location.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBFieldBuilder(); getLocsFieldBuilder(); getBlockTokenFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (bBuilder_ == null) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); offset_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); } else { locsBuilder_.clear(); } corrupt_ = false; bitField0_ = (bitField0_ & ~0x00000008); if (blockTokenBuilder_ == null) { blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance(); } else { blockTokenBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); isCached_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (bBuilder_ == null) { result.b_ = b_; } else { result.b_ = bBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.offset_ = offset_; if (locsBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004)) { locs_ = java.util.Collections.unmodifiableList(locs_); bitField0_ = (bitField0_ & ~0x00000004); } result.locs_ = locs_; } else { result.locs_ = locsBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000004; } result.corrupt_ = corrupt_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000008; } if (blockTokenBuilder_ == null) { result.blockToken_ = blockToken_; } else { result.blockToken_ = blockTokenBuilder_.build(); } if (((bitField0_ & 0x00000020) == 0x00000020)) { isCached_ = java.util.Collections.unmodifiableList(isCached_); bitField0_ = (bitField0_ & ~0x00000020); } result.isCached_ = isCached_; if (((bitField0_ & 0x00000040) == 0x00000040)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000040); } result.storageTypes_ = storageTypes_; if (((bitField0_ & 0x00000080) == 0x00000080)) { storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList( storageIDs_); bitField0_ = (bitField0_ & ~0x00000080); } result.storageIDs_ = storageIDs_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this; if (other.hasB()) { mergeB(other.getB()); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (locsBuilder_ == null) { if (!other.locs_.isEmpty()) { if (locs_.isEmpty()) { locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureLocsIsMutable(); locs_.addAll(other.locs_); } onChanged(); } } else { if (!other.locs_.isEmpty()) { if (locsBuilder_.isEmpty()) { locsBuilder_.dispose(); locsBuilder_ = null; locs_ = other.locs_; bitField0_ = (bitField0_ & ~0x00000004); locsBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getLocsFieldBuilder() : null; } else { locsBuilder_.addAllMessages(other.locs_); } } } if (other.hasCorrupt()) { setCorrupt(other.getCorrupt()); } if (other.hasBlockToken()) { mergeBlockToken(other.getBlockToken()); } if (!other.isCached_.isEmpty()) { if (isCached_.isEmpty()) { isCached_ = other.isCached_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureIsCachedIsMutable(); isCached_.addAll(other.isCached_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } if (!other.storageIDs_.isEmpty()) { if (storageIDs_.isEmpty()) { storageIDs_ = other.storageIDs_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureStorageIDsIsMutable(); storageIDs_.addAll(other.storageIDs_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasB()) { return false; } if (!hasOffset()) { return false; } if (!hasCorrupt()) { return false; } if (!hasBlockToken()) { return false; } if (!getB().isInitialized()) { return false; } for (int i = 0; i < getLocsCount(); i++) { if (!getLocs(i).isInitialized()) { return false; } } if (!getBlockToken().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto b = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { if (bBuilder_ == null) { return b_; } else { return bBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (value == null) { throw new NullPointerException(); } b_ = value; onChanged(); } else { bBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (bBuilder_ == null) { b_ = builderForValue.build(); onChanged(); } else { bBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial(); } else { b_ = value; } onChanged(); } else { bBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder clearB() { if (bBuilder_ == null) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { if (bBuilder_ != null) { return bBuilder_.getMessageOrBuilder(); } else { return b_; } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBFieldBuilder() { if (bBuilder_ == null) { bBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( b_, getParentForChildren(), isClean()); b_ = null; } return bBuilder_; } // required uint64 offset = 2; private long offset_ ; /** * required uint64 offset = 2; * *
       * offset of first byte of block in the file
       * 
*/ public boolean hasOffset() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 offset = 2; * *
       * offset of first byte of block in the file
       * 
*/ public long getOffset() { return offset_; } /** * required uint64 offset = 2; * *
       * offset of first byte of block in the file
       * 
*/ public Builder setOffset(long value) { bitField0_ |= 0x00000002; offset_ = value; onChanged(); return this; } /** * required uint64 offset = 2; * *
       * offset of first byte of block in the file
       * 
*/ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } // repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; private java.util.List locs_ = java.util.Collections.emptyList(); private void ensureLocsIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { locs_ = new java.util.ArrayList(locs_); bitField0_ |= 0x00000004; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public java.util.List getLocsList() { if (locsBuilder_ == null) { return java.util.Collections.unmodifiableList(locs_); } else { return locsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public int getLocsCount() { if (locsBuilder_ == null) { return locs_.size(); } else { return locsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.set(index, value); onChanged(); } else { locsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder setLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.set(index, builderForValue.build()); onChanged(); } else { locsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(value); onChanged(); } else { locsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (locsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocsIsMutable(); locs_.add(index, value); onChanged(); } else { locsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder addLocs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder addLocs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.add(index, builderForValue.build()); onChanged(); } else { locsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder addAllLocs( java.lang.Iterable values) { if (locsBuilder_ == null) { ensureLocsIsMutable(); super.addAll(values, locs_); onChanged(); } else { locsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder clearLocs() { if (locsBuilder_ == null) { locs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { locsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public Builder removeLocs(int index) { if (locsBuilder_ == null) { ensureLocsIsMutable(); locs_.remove(index); onChanged(); } else { locsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder( int index) { return getLocsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( int index) { if (locsBuilder_ == null) { return locs_.get(index); } else { return locsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public java.util.List getLocsOrBuilderList() { if (locsBuilder_ != null) { return locsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(locs_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() { return getLocsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder( int index) { return getLocsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto locs = 3; * *
       * Locations ordered by proximity to client ip
       * 
*/ public java.util.List getLocsBuilderList() { return getLocsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getLocsFieldBuilder() { if (locsBuilder_ == null) { locsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( locs_, ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); locs_ = null; } return locsBuilder_; } // required bool corrupt = 4; private boolean corrupt_ ; /** * required bool corrupt = 4; * *
       * true if all replicas of a block are corrupt, else false
       * 
*/ public boolean hasCorrupt() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool corrupt = 4; * *
       * true if all replicas of a block are corrupt, else false
       * 
*/ public boolean getCorrupt() { return corrupt_; } /** * required bool corrupt = 4; * *
       * true if all replicas of a block are corrupt, else false
       * 
*/ public Builder setCorrupt(boolean value) { bitField0_ |= 0x00000008; corrupt_ = value; onChanged(); return this; } /** * required bool corrupt = 4; * *
       * true if all replicas of a block are corrupt, else false
       * 
*/ public Builder clearCorrupt() { bitField0_ = (bitField0_ & ~0x00000008); corrupt_ = false; onChanged(); return this; } // required .hadoop.common.TokenProto blockToken = 5; private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_; /** * required .hadoop.common.TokenProto blockToken = 5; */ public boolean hasBlockToken() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() { if (blockTokenBuilder_ == null) { return blockToken_; } else { return blockTokenBuilder_.getMessage(); } } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (value == null) { throw new NullPointerException(); } blockToken_ = value; onChanged(); } else { blockTokenBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder setBlockToken( org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) { if (blockTokenBuilder_ == null) { blockToken_ = builderForValue.build(); onChanged(); } else { blockTokenBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) { if (blockTokenBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) { blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(blockToken_).mergeFrom(value).buildPartial(); } else { blockToken_ = value; } onChanged(); } else { blockTokenBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public Builder clearBlockToken() { if (blockTokenBuilder_ == null) { blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance(); onChanged(); } else { blockTokenBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() { bitField0_ |= 0x00000010; onChanged(); return getBlockTokenFieldBuilder().getBuilder(); } /** * required .hadoop.common.TokenProto blockToken = 5; */ public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() { if (blockTokenBuilder_ != null) { return blockTokenBuilder_.getMessageOrBuilder(); } else { return blockToken_; } } /** * required .hadoop.common.TokenProto blockToken = 5; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> getBlockTokenFieldBuilder() { if (blockTokenBuilder_ == null) { blockTokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>( blockToken_, getParentForChildren(), isClean()); blockToken_ = null; } return blockTokenBuilder_; } // repeated bool isCached = 6 [packed = true]; private java.util.List isCached_ = java.util.Collections.emptyList(); private void ensureIsCachedIsMutable() { if (!((bitField0_ & 0x00000020) == 0x00000020)) { isCached_ = new java.util.ArrayList(isCached_); bitField0_ |= 0x00000020; } } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public java.util.List getIsCachedList() { return java.util.Collections.unmodifiableList(isCached_); } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public int getIsCachedCount() { return isCached_.size(); } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public boolean getIsCached(int index) { return isCached_.get(index); } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public Builder setIsCached( int index, boolean value) { ensureIsCachedIsMutable(); isCached_.set(index, value); onChanged(); return this; } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public Builder addIsCached(boolean value) { ensureIsCachedIsMutable(); isCached_.add(value); onChanged(); return this; } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public Builder addAllIsCached( java.lang.Iterable values) { ensureIsCachedIsMutable(); super.addAll(values, isCached_); onChanged(); return this; } /** * repeated bool isCached = 6 [packed = true]; * *
       * if a location in locs is cached
       * 
*/ public Builder clearIsCached() { isCached_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000040) == 0x00000040)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public java.util.List getStorageTypesList() { return java.util.Collections.unmodifiableList(storageTypes_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); super.addAll(values, storageTypes_); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } // repeated string storageIDs = 8; private com.google.protobuf.LazyStringList storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIDsIsMutable() { if (!((bitField0_ & 0x00000080) == 0x00000080)) { storageIDs_ = new com.google.protobuf.LazyStringArrayList(storageIDs_); bitField0_ |= 0x00000080; } } /** * repeated string storageIDs = 8; */ public java.util.List getStorageIDsList() { return java.util.Collections.unmodifiableList(storageIDs_); } /** * repeated string storageIDs = 8; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 8; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 8; */ public com.google.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } /** * repeated string storageIDs = 8; */ public Builder setStorageIDs( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.set(index, value); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addStorageIDs( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addAllStorageIDs( java.lang.Iterable values) { ensureStorageIDsIsMutable(); super.addAll(values, storageIDs_); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder clearStorageIDs() { storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * repeated string storageIDs = 8; */ public Builder addStorageIDsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto) } static { defaultInstance = new LocatedBlockProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto) } public interface DataEncryptionKeyProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 keyId = 1; /** * required uint32 keyId = 1; */ boolean hasKeyId(); /** * required uint32 keyId = 1; */ int getKeyId(); // required string blockPoolId = 2; /** * required string blockPoolId = 2; */ boolean hasBlockPoolId(); /** * required string blockPoolId = 2; */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 2; */ com.google.protobuf.ByteString getBlockPoolIdBytes(); // required bytes nonce = 3; /** * required bytes nonce = 3; */ boolean hasNonce(); /** * required bytes nonce = 3; */ com.google.protobuf.ByteString getNonce(); // required bytes encryptionKey = 4; /** * required bytes encryptionKey = 4; */ boolean hasEncryptionKey(); /** * required bytes encryptionKey = 4; */ com.google.protobuf.ByteString getEncryptionKey(); // required uint64 expiryDate = 5; /** * required uint64 expiryDate = 5; */ boolean hasExpiryDate(); /** * required uint64 expiryDate = 5; */ long getExpiryDate(); // optional string encryptionAlgorithm = 6; /** * optional string encryptionAlgorithm = 6; */ boolean hasEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; */ java.lang.String getEncryptionAlgorithm(); /** * optional string encryptionAlgorithm = 6; */ com.google.protobuf.ByteString getEncryptionAlgorithmBytes(); } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class DataEncryptionKeyProto extends com.google.protobuf.GeneratedMessage implements DataEncryptionKeyProtoOrBuilder { // Use DataEncryptionKeyProto.newBuilder() to construct. private DataEncryptionKeyProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DataEncryptionKeyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DataEncryptionKeyProto defaultInstance; public static DataEncryptionKeyProto getDefaultInstance() { return defaultInstance; } public DataEncryptionKeyProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DataEncryptionKeyProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; keyId_ = input.readUInt32(); break; } case 18: { bitField0_ |= 0x00000002; blockPoolId_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; nonce_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; encryptionKey_ = input.readBytes(); break; } case 40: { bitField0_ |= 0x00000010; expiryDate_ = input.readUInt64(); break; } case 50: { bitField0_ |= 0x00000020; encryptionAlgorithm_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DataEncryptionKeyProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DataEncryptionKeyProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 keyId = 1; public static final int KEYID_FIELD_NUMBER = 1; private int keyId_; /** * required uint32 keyId = 1; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 keyId = 1; */ public int getKeyId() { return keyId_; } // required string blockPoolId = 2; public static final int BLOCKPOOLID_FIELD_NUMBER = 2; private java.lang.Object blockPoolId_; /** * required string blockPoolId = 2; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string blockPoolId = 2; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 2; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required bytes nonce = 3; public static final int NONCE_FIELD_NUMBER = 3; private com.google.protobuf.ByteString nonce_; /** * required bytes nonce = 3; */ public boolean hasNonce() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bytes nonce = 3; */ public com.google.protobuf.ByteString getNonce() { return nonce_; } // required bytes encryptionKey = 4; public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4; private com.google.protobuf.ByteString encryptionKey_; /** * required bytes encryptionKey = 4; */ public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes encryptionKey = 4; */ public com.google.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } // required uint64 expiryDate = 5; public static final int EXPIRYDATE_FIELD_NUMBER = 5; private long expiryDate_; /** * required uint64 expiryDate = 5; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 expiryDate = 5; */ public long getExpiryDate() { return expiryDate_; } // optional string encryptionAlgorithm = 6; public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6; private java.lang.Object encryptionAlgorithm_; /** * optional string encryptionAlgorithm = 6; */ public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string encryptionAlgorithm = 6; */ public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { encryptionAlgorithm_ = s; } return s; } } /** * optional string encryptionAlgorithm = 6; */ public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { keyId_ = 0; blockPoolId_ = ""; nonce_ = com.google.protobuf.ByteString.EMPTY; encryptionKey_ = com.google.protobuf.ByteString.EMPTY; expiryDate_ = 0L; encryptionAlgorithm_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasKeyId()) { memoizedIsInitialized = 0; return false; } if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasNonce()) { memoizedIsInitialized = 0; return false; } if (!hasEncryptionKey()) { memoizedIsInitialized = 0; return false; } if (!hasExpiryDate()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, keyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, nonce_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, encryptionKey_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, expiryDate_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getEncryptionAlgorithmBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, keyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, nonce_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, encryptionKey_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(5, expiryDate_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getEncryptionAlgorithmBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj; boolean result = true; result = result && (hasKeyId() == other.hasKeyId()); if (hasKeyId()) { result = result && (getKeyId() == other.getKeyId()); } result = result && (hasBlockPoolId() == other.hasBlockPoolId()); if (hasBlockPoolId()) { result = result && getBlockPoolId() .equals(other.getBlockPoolId()); } result = result && (hasNonce() == other.hasNonce()); if (hasNonce()) { result = result && getNonce() .equals(other.getNonce()); } result = result && (hasEncryptionKey() == other.hasEncryptionKey()); if (hasEncryptionKey()) { result = result && getEncryptionKey() .equals(other.getEncryptionKey()); } result = result && (hasExpiryDate() == other.hasExpiryDate()); if (hasExpiryDate()) { result = result && (getExpiryDate() == other.getExpiryDate()); } result = result && (hasEncryptionAlgorithm() == other.hasEncryptionAlgorithm()); if (hasEncryptionAlgorithm()) { result = result && getEncryptionAlgorithm() .equals(other.getEncryptionAlgorithm()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasNonce()) { hash = (37 * hash) + NONCE_FIELD_NUMBER; hash = (53 * hash) + getNonce().hashCode(); } if (hasEncryptionKey()) { hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER; hash = (53 * hash) + getEncryptionKey().hashCode(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getExpiryDate()); } if (hasEncryptionAlgorithm()) { hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER; hash = (53 * hash) + getEncryptionAlgorithm().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); keyId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000002); nonce_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); encryptionKey_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); encryptionAlgorithm_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.keyId_ = keyId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.nonce_ = nonce_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.encryptionKey_ = encryptionKey_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.expiryDate_ = expiryDate_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.encryptionAlgorithm_ = encryptionAlgorithm_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this; if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000002; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasNonce()) { setNonce(other.getNonce()); } if (other.hasEncryptionKey()) { setEncryptionKey(other.getEncryptionKey()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasEncryptionAlgorithm()) { bitField0_ |= 0x00000020; encryptionAlgorithm_ = other.encryptionAlgorithm_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasKeyId()) { return false; } if (!hasBlockPoolId()) { return false; } if (!hasNonce()) { return false; } if (!hasEncryptionKey()) { return false; } if (!hasExpiryDate()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 keyId = 1; private int keyId_ ; /** * required uint32 keyId = 1; */ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 keyId = 1; */ public int getKeyId() { return keyId_; } /** * required uint32 keyId = 1; */ public Builder setKeyId(int value) { bitField0_ |= 0x00000001; keyId_ = value; onChanged(); return this; } /** * required uint32 keyId = 1; */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000001); keyId_ = 0; onChanged(); return this; } // required string blockPoolId = 2; private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 2; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string blockPoolId = 2; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); blockPoolId_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 2; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string blockPoolId = 2; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; blockPoolId_ = value; onChanged(); return this; } /** * required string blockPoolId = 2; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000002); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * required string blockPoolId = 2; */ public Builder setBlockPoolIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; blockPoolId_ = value; onChanged(); return this; } // required bytes nonce = 3; private com.google.protobuf.ByteString nonce_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes nonce = 3; */ public boolean hasNonce() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bytes nonce = 3; */ public com.google.protobuf.ByteString getNonce() { return nonce_; } /** * required bytes nonce = 3; */ public Builder setNonce(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; nonce_ = value; onChanged(); return this; } /** * required bytes nonce = 3; */ public Builder clearNonce() { bitField0_ = (bitField0_ & ~0x00000004); nonce_ = getDefaultInstance().getNonce(); onChanged(); return this; } // required bytes encryptionKey = 4; private com.google.protobuf.ByteString encryptionKey_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes encryptionKey = 4; */ public boolean hasEncryptionKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes encryptionKey = 4; */ public com.google.protobuf.ByteString getEncryptionKey() { return encryptionKey_; } /** * required bytes encryptionKey = 4; */ public Builder setEncryptionKey(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; encryptionKey_ = value; onChanged(); return this; } /** * required bytes encryptionKey = 4; */ public Builder clearEncryptionKey() { bitField0_ = (bitField0_ & ~0x00000008); encryptionKey_ = getDefaultInstance().getEncryptionKey(); onChanged(); return this; } // required uint64 expiryDate = 5; private long expiryDate_ ; /** * required uint64 expiryDate = 5; */ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 expiryDate = 5; */ public long getExpiryDate() { return expiryDate_; } /** * required uint64 expiryDate = 5; */ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000010; expiryDate_ = value; onChanged(); return this; } /** * required uint64 expiryDate = 5; */ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000010); expiryDate_ = 0L; onChanged(); return this; } // optional string encryptionAlgorithm = 6; private java.lang.Object encryptionAlgorithm_ = ""; /** * optional string encryptionAlgorithm = 6; */ public boolean hasEncryptionAlgorithm() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string encryptionAlgorithm = 6; */ public java.lang.String getEncryptionAlgorithm() { java.lang.Object ref = encryptionAlgorithm_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); encryptionAlgorithm_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string encryptionAlgorithm = 6; */ public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { java.lang.Object ref = encryptionAlgorithm_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); encryptionAlgorithm_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * optional string encryptionAlgorithm = 6; */ public Builder setEncryptionAlgorithm( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; encryptionAlgorithm_ = value; onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; */ public Builder clearEncryptionAlgorithm() { bitField0_ = (bitField0_ & ~0x00000020); encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm(); onChanged(); return this; } /** * optional string encryptionAlgorithm = 6; */ public Builder setEncryptionAlgorithmBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; encryptionAlgorithm_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto) } static { defaultInstance = new DataEncryptionKeyProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto) } public interface FileEncryptionInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CipherSuiteProto suite = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); // required bytes key = 3; /** * required bytes key = 3; */ boolean hasKey(); /** * required bytes key = 3; */ com.google.protobuf.ByteString getKey(); // required bytes iv = 4; /** * required bytes iv = 4; */ boolean hasIv(); /** * required bytes iv = 4; */ com.google.protobuf.ByteString getIv(); // required string keyName = 5; /** * required string keyName = 5; */ boolean hasKeyName(); /** * required string keyName = 5; */ java.lang.String getKeyName(); /** * required string keyName = 5; */ com.google.protobuf.ByteString getKeyNameBytes(); // required string ezKeyVersionName = 6; /** * required string ezKeyVersionName = 6; */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 6; */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 6; */ com.google.protobuf.ByteString getEzKeyVersionNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} * *
   **
   * Encryption information for a file.
   * 
*/ public static final class FileEncryptionInfoProto extends com.google.protobuf.GeneratedMessage implements FileEncryptionInfoProtoOrBuilder { // Use FileEncryptionInfoProto.newBuilder() to construct. private FileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FileEncryptionInfoProto defaultInstance; public static FileEncryptionInfoProto getDefaultInstance() { return defaultInstance; } public FileEncryptionInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FileEncryptionInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = value; } break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value; } break; } case 26: { bitField0_ |= 0x00000004; key_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; iv_ = input.readBytes(); break; } case 42: { bitField0_ |= 0x00000010; keyName_ = input.readBytes(); break; } case 50: { bitField0_ |= 0x00000020; ezKeyVersionName_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FileEncryptionInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FileEncryptionInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; public static final int SUITE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { return cryptoProtocolVersion_; } // required bytes key = 3; public static final int KEY_FIELD_NUMBER = 3; private com.google.protobuf.ByteString key_; /** * required bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bytes key = 3; */ public com.google.protobuf.ByteString getKey() { return key_; } // required bytes iv = 4; public static final int IV_FIELD_NUMBER = 4; private com.google.protobuf.ByteString iv_; /** * required bytes iv = 4; */ public boolean hasIv() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes iv = 4; */ public com.google.protobuf.ByteString getIv() { return iv_; } // required string keyName = 5; public static final int KEYNAME_FIELD_NUMBER = 5; private java.lang.Object keyName_; /** * required string keyName = 5; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string keyName = 5; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 5; */ public com.google.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string ezKeyVersionName = 6; public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6; private java.lang.Object ezKeyVersionName_; /** * required string ezKeyVersionName = 6; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string ezKeyVersionName = 6; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 6; */ public com.google.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; key_ = com.google.protobuf.ByteString.EMPTY; iv_ = com.google.protobuf.ByteString.EMPTY; keyName_ = ""; ezKeyVersionName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeEnum(2, cryptoProtocolVersion_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, key_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, iv_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, getKeyNameBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getEzKeyVersionNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, key_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, iv_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(5, getKeyNameBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getEzKeyVersionNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj; boolean result = true; result = result && (hasSuite() == other.hasSuite()); if (hasSuite()) { result = result && (getSuite() == other.getSuite()); } result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion()); if (hasCryptoProtocolVersion()) { result = result && (getCryptoProtocolVersion() == other.getCryptoProtocolVersion()); } result = result && (hasKey() == other.hasKey()); if (hasKey()) { result = result && getKey() .equals(other.getKey()); } result = result && (hasIv() == other.hasIv()); if (hasIv()) { result = result && getIv() .equals(other.getIv()); } result = result && (hasKeyName() == other.hasKeyName()); if (hasKeyName()) { result = result && getKeyName() .equals(other.getKeyName()); } result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName()); if (hasEzKeyVersionName()) { result = result && getEzKeyVersionName() .equals(other.getEzKeyVersionName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getSuite()); } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getCryptoProtocolVersion()); } if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto} * *
     **
     * Encryption information for a file.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; bitField0_ = (bitField0_ & ~0x00000001); cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; bitField0_ = (bitField0_ & ~0x00000002); key_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); iv_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); keyName_ = ""; bitField0_ = (bitField0_ & ~0x00000010); ezKeyVersionName_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.cryptoProtocolVersion_ = cryptoProtocolVersion_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.key_ = key_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.iv_ = iv_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.keyName_ = keyName_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.ezKeyVersionName_ = ezKeyVersionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasKeyName()) { bitField0_ |= 0x00000010; keyName_ = other.keyName_; onChanged(); } if (other.hasEzKeyVersionName()) { bitField0_ |= 0x00000020; ezKeyVersionName_ = other.ezKeyVersionName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasKeyName()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; onChanged(); return this; } // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { return cryptoProtocolVersion_; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; onChanged(); return this; } // required bytes key = 3; private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes key = 3; */ public boolean hasKey() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bytes key = 3; */ public com.google.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 3; */ public Builder setKey(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; } /** * required bytes key = 3; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; } // required bytes iv = 4; private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes iv = 4; */ public boolean hasIv() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes iv = 4; */ public com.google.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 4; */ public Builder setIv(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; iv_ = value; onChanged(); return this; } /** * required bytes iv = 4; */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000008); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } // required string keyName = 5; private java.lang.Object keyName_ = ""; /** * required string keyName = 5; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string keyName = 5; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); keyName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 5; */ public com.google.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string keyName = 5; */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; keyName_ = value; onChanged(); return this; } /** * required string keyName = 5; */ public Builder clearKeyName() { bitField0_ = (bitField0_ & ~0x00000010); keyName_ = getDefaultInstance().getKeyName(); onChanged(); return this; } /** * required string keyName = 5; */ public Builder setKeyNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; keyName_ = value; onChanged(); return this; } // required string ezKeyVersionName = 6; private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 6; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string ezKeyVersionName = 6; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); ezKeyVersionName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 6; */ public com.google.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 6; */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; ezKeyVersionName_ = value; onChanged(); return this; } /** * required string ezKeyVersionName = 6; */ public Builder clearEzKeyVersionName() { bitField0_ = (bitField0_ & ~0x00000020); ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); onChanged(); return this; } /** * required string ezKeyVersionName = 6; */ public Builder setEzKeyVersionNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; ezKeyVersionName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto) } static { defaultInstance = new FileEncryptionInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto) } public interface PerFileEncryptionInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bytes key = 1; /** * required bytes key = 1; */ boolean hasKey(); /** * required bytes key = 1; */ com.google.protobuf.ByteString getKey(); // required bytes iv = 2; /** * required bytes iv = 2; */ boolean hasIv(); /** * required bytes iv = 2; */ com.google.protobuf.ByteString getIv(); // required string ezKeyVersionName = 3; /** * required string ezKeyVersionName = 3; */ boolean hasEzKeyVersionName(); /** * required string ezKeyVersionName = 3; */ java.lang.String getEzKeyVersionName(); /** * required string ezKeyVersionName = 3; */ com.google.protobuf.ByteString getEzKeyVersionNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} * *
   **
   * Encryption information for an individual
   * file within an encryption zone
   * 
*/ public static final class PerFileEncryptionInfoProto extends com.google.protobuf.GeneratedMessage implements PerFileEncryptionInfoProtoOrBuilder { // Use PerFileEncryptionInfoProto.newBuilder() to construct. private PerFileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private PerFileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final PerFileEncryptionInfoProto defaultInstance; public static PerFileEncryptionInfoProto getDefaultInstance() { return defaultInstance; } public PerFileEncryptionInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private PerFileEncryptionInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; key_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; iv_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; ezKeyVersionName_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public PerFileEncryptionInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new PerFileEncryptionInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bytes key = 1; public static final int KEY_FIELD_NUMBER = 1; private com.google.protobuf.ByteString key_; /** * required bytes key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes key = 1; */ public com.google.protobuf.ByteString getKey() { return key_; } // required bytes iv = 2; public static final int IV_FIELD_NUMBER = 2; private com.google.protobuf.ByteString iv_; /** * required bytes iv = 2; */ public boolean hasIv() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes iv = 2; */ public com.google.protobuf.ByteString getIv() { return iv_; } // required string ezKeyVersionName = 3; public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3; private java.lang.Object ezKeyVersionName_; /** * required string ezKeyVersionName = 3; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string ezKeyVersionName = 3; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ezKeyVersionName_ = s; } return s; } } /** * required string ezKeyVersionName = 3; */ public com.google.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { key_ = com.google.protobuf.ByteString.EMPTY; iv_ = com.google.protobuf.ByteString.EMPTY; ezKeyVersionName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasKey()) { memoizedIsInitialized = 0; return false; } if (!hasIv()) { memoizedIsInitialized = 0; return false; } if (!hasEzKeyVersionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, key_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, iv_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getEzKeyVersionNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, key_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, iv_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getEzKeyVersionNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj; boolean result = true; result = result && (hasKey() == other.hasKey()); if (hasKey()) { result = result && getKey() .equals(other.getKey()); } result = result && (hasIv() == other.hasIv()); if (hasIv()) { result = result && getIv() .equals(other.getIv()); } result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName()); if (hasEzKeyVersionName()) { result = result && getEzKeyVersionName() .equals(other.getEzKeyVersionName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasIv()) { hash = (37 * hash) + IV_FIELD_NUMBER; hash = (53 * hash) + getIv().hashCode(); } if (hasEzKeyVersionName()) { hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER; hash = (53 * hash) + getEzKeyVersionName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto} * *
     **
     * Encryption information for an individual
     * file within an encryption zone
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); key_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); iv_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); ezKeyVersionName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.iv_ = iv_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.ezKeyVersionName_ = ezKeyVersionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasKey()) { setKey(other.getKey()); } if (other.hasIv()) { setIv(other.getIv()); } if (other.hasEzKeyVersionName()) { bitField0_ |= 0x00000004; ezKeyVersionName_ = other.ezKeyVersionName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasKey()) { return false; } if (!hasIv()) { return false; } if (!hasEzKeyVersionName()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bytes key = 1; private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes key = 1; */ public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes key = 1; */ public com.google.protobuf.ByteString getKey() { return key_; } /** * required bytes key = 1; */ public Builder setKey(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; key_ = value; onChanged(); return this; } /** * required bytes key = 1; */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); key_ = getDefaultInstance().getKey(); onChanged(); return this; } // required bytes iv = 2; private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes iv = 2; */ public boolean hasIv() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes iv = 2; */ public com.google.protobuf.ByteString getIv() { return iv_; } /** * required bytes iv = 2; */ public Builder setIv(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; iv_ = value; onChanged(); return this; } /** * required bytes iv = 2; */ public Builder clearIv() { bitField0_ = (bitField0_ & ~0x00000002); iv_ = getDefaultInstance().getIv(); onChanged(); return this; } // required string ezKeyVersionName = 3; private java.lang.Object ezKeyVersionName_ = ""; /** * required string ezKeyVersionName = 3; */ public boolean hasEzKeyVersionName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string ezKeyVersionName = 3; */ public java.lang.String getEzKeyVersionName() { java.lang.Object ref = ezKeyVersionName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); ezKeyVersionName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string ezKeyVersionName = 3; */ public com.google.protobuf.ByteString getEzKeyVersionNameBytes() { java.lang.Object ref = ezKeyVersionName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ezKeyVersionName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string ezKeyVersionName = 3; */ public Builder setEzKeyVersionName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; ezKeyVersionName_ = value; onChanged(); return this; } /** * required string ezKeyVersionName = 3; */ public Builder clearEzKeyVersionName() { bitField0_ = (bitField0_ & ~0x00000004); ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName(); onChanged(); return this; } /** * required string ezKeyVersionName = 3; */ public Builder setEzKeyVersionNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; ezKeyVersionName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto) } static { defaultInstance = new PerFileEncryptionInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto) } public interface ZoneEncryptionInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CipherSuiteProto suite = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ boolean hasCryptoProtocolVersion(); /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(); // required string keyName = 3; /** * required string keyName = 3; */ boolean hasKeyName(); /** * required string keyName = 3; */ java.lang.String getKeyName(); /** * required string keyName = 3; */ com.google.protobuf.ByteString getKeyNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} * *
   **
   * Encryption information for an encryption
   * zone
   * 
*/ public static final class ZoneEncryptionInfoProto extends com.google.protobuf.GeneratedMessage implements ZoneEncryptionInfoProtoOrBuilder { // Use ZoneEncryptionInfoProto.newBuilder() to construct. private ZoneEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ZoneEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ZoneEncryptionInfoProto defaultInstance; public static ZoneEncryptionInfoProto getDefaultInstance() { return defaultInstance; } public ZoneEncryptionInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ZoneEncryptionInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = value; } break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value; } break; } case 26: { bitField0_ |= 0x00000004; keyName_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public ZoneEncryptionInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ZoneEncryptionInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; public static final int SUITE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { return cryptoProtocolVersion_; } // required string keyName = 3; public static final int KEYNAME_FIELD_NUMBER = 3; private java.lang.Object keyName_; /** * required string keyName = 3; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string keyName = 3; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { keyName_ = s; } return s; } } /** * required string keyName = 3; */ public com.google.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; keyName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } if (!hasCryptoProtocolVersion()) { memoizedIsInitialized = 0; return false; } if (!hasKeyName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeEnum(2, cryptoProtocolVersion_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getKeyNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(2, cryptoProtocolVersion_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getKeyNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj; boolean result = true; result = result && (hasSuite() == other.hasSuite()); if (hasSuite()) { result = result && (getSuite() == other.getSuite()); } result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion()); if (hasCryptoProtocolVersion()) { result = result && (getCryptoProtocolVersion() == other.getCryptoProtocolVersion()); } result = result && (hasKeyName() == other.hasKeyName()); if (hasKeyName()) { result = result && getKeyName() .equals(other.getKeyName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getSuite()); } if (hasCryptoProtocolVersion()) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getCryptoProtocolVersion()); } if (hasKeyName()) { hash = (37 * hash) + KEYNAME_FIELD_NUMBER; hash = (53 * hash) + getKeyName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto} * *
     **
     * Encryption information for an encryption
     * zone
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; bitField0_ = (bitField0_ & ~0x00000001); cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; bitField0_ = (bitField0_ & ~0x00000002); keyName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.cryptoProtocolVersion_ = cryptoProtocolVersion_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.keyName_ = keyName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasCryptoProtocolVersion()) { setCryptoProtocolVersion(other.getCryptoProtocolVersion()); } if (other.hasKeyName()) { bitField0_ |= 0x00000004; keyName_ = other.keyName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSuite()) { return false; } if (!hasCryptoProtocolVersion()) { return false; } if (!hasKeyName()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; onChanged(); return this; } // required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public boolean hasCryptoProtocolVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() { return cryptoProtocolVersion_; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cryptoProtocolVersion_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2; */ public Builder clearCryptoProtocolVersion() { bitField0_ = (bitField0_ & ~0x00000002); cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION; onChanged(); return this; } // required string keyName = 3; private java.lang.Object keyName_ = ""; /** * required string keyName = 3; */ public boolean hasKeyName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string keyName = 3; */ public java.lang.String getKeyName() { java.lang.Object ref = keyName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); keyName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string keyName = 3; */ public com.google.protobuf.ByteString getKeyNameBytes() { java.lang.Object ref = keyName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); keyName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string keyName = 3; */ public Builder setKeyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; keyName_ = value; onChanged(); return this; } /** * required string keyName = 3; */ public Builder clearKeyName() { bitField0_ = (bitField0_ & ~0x00000004); keyName_ = getDefaultInstance().getKeyName(); onChanged(); return this; } /** * required string keyName = 3; */ public Builder setKeyNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; keyName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto) } static { defaultInstance = new ZoneEncryptionInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto) } public interface CipherOptionProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CipherSuiteProto suite = 1; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ boolean hasSuite(); /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite(); // optional bytes inKey = 2; /** * optional bytes inKey = 2; */ boolean hasInKey(); /** * optional bytes inKey = 2; */ com.google.protobuf.ByteString getInKey(); // optional bytes inIv = 3; /** * optional bytes inIv = 3; */ boolean hasInIv(); /** * optional bytes inIv = 3; */ com.google.protobuf.ByteString getInIv(); // optional bytes outKey = 4; /** * optional bytes outKey = 4; */ boolean hasOutKey(); /** * optional bytes outKey = 4; */ com.google.protobuf.ByteString getOutKey(); // optional bytes outIv = 5; /** * optional bytes outIv = 5; */ boolean hasOutIv(); /** * optional bytes outIv = 5; */ com.google.protobuf.ByteString getOutIv(); } /** * Protobuf type {@code hadoop.hdfs.CipherOptionProto} * *
   **
   * Cipher option
   * 
*/ public static final class CipherOptionProto extends com.google.protobuf.GeneratedMessage implements CipherOptionProtoOrBuilder { // Use CipherOptionProto.newBuilder() to construct. private CipherOptionProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CipherOptionProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CipherOptionProto defaultInstance; public static CipherOptionProto getDefaultInstance() { return defaultInstance; } public CipherOptionProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CipherOptionProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; suite_ = value; } break; } case 18: { bitField0_ |= 0x00000002; inKey_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; inIv_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; outKey_ = input.readBytes(); break; } case 42: { bitField0_ |= 0x00000010; outIv_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CipherOptionProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CipherOptionProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; public static final int SUITE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } // optional bytes inKey = 2; public static final int INKEY_FIELD_NUMBER = 2; private com.google.protobuf.ByteString inKey_; /** * optional bytes inKey = 2; */ public boolean hasInKey() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes inKey = 2; */ public com.google.protobuf.ByteString getInKey() { return inKey_; } // optional bytes inIv = 3; public static final int INIV_FIELD_NUMBER = 3; private com.google.protobuf.ByteString inIv_; /** * optional bytes inIv = 3; */ public boolean hasInIv() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes inIv = 3; */ public com.google.protobuf.ByteString getInIv() { return inIv_; } // optional bytes outKey = 4; public static final int OUTKEY_FIELD_NUMBER = 4; private com.google.protobuf.ByteString outKey_; /** * optional bytes outKey = 4; */ public boolean hasOutKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional bytes outKey = 4; */ public com.google.protobuf.ByteString getOutKey() { return outKey_; } // optional bytes outIv = 5; public static final int OUTIV_FIELD_NUMBER = 5; private com.google.protobuf.ByteString outIv_; /** * optional bytes outIv = 5; */ public boolean hasOutIv() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional bytes outIv = 5; */ public com.google.protobuf.ByteString getOutIv() { return outIv_; } private void initFields() { suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; inKey_ = com.google.protobuf.ByteString.EMPTY; inIv_ = com.google.protobuf.ByteString.EMPTY; outKey_ = com.google.protobuf.ByteString.EMPTY; outIv_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSuite()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, inKey_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, inIv_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, outKey_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, outIv_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, suite_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, inKey_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, inIv_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, outKey_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(5, outIv_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj; boolean result = true; result = result && (hasSuite() == other.hasSuite()); if (hasSuite()) { result = result && (getSuite() == other.getSuite()); } result = result && (hasInKey() == other.hasInKey()); if (hasInKey()) { result = result && getInKey() .equals(other.getInKey()); } result = result && (hasInIv() == other.hasInIv()); if (hasInIv()) { result = result && getInIv() .equals(other.getInIv()); } result = result && (hasOutKey() == other.hasOutKey()); if (hasOutKey()) { result = result && getOutKey() .equals(other.getOutKey()); } result = result && (hasOutIv() == other.hasOutIv()); if (hasOutIv()) { result = result && getOutIv() .equals(other.getOutIv()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSuite()) { hash = (37 * hash) + SUITE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getSuite()); } if (hasInKey()) { hash = (37 * hash) + INKEY_FIELD_NUMBER; hash = (53 * hash) + getInKey().hashCode(); } if (hasInIv()) { hash = (37 * hash) + INIV_FIELD_NUMBER; hash = (53 * hash) + getInIv().hashCode(); } if (hasOutKey()) { hash = (37 * hash) + OUTKEY_FIELD_NUMBER; hash = (53 * hash) + getOutKey().hashCode(); } if (hasOutIv()) { hash = (37 * hash) + OUTIV_FIELD_NUMBER; hash = (53 * hash) + getOutIv().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CipherOptionProto} * *
     **
     * Cipher option
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; bitField0_ = (bitField0_ & ~0x00000001); inKey_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); inIv_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); outKey_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); outIv_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.suite_ = suite_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.inKey_ = inKey_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.inIv_ = inIv_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.outKey_ = outKey_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.outIv_ = outIv_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this; if (other.hasSuite()) { setSuite(other.getSuite()); } if (other.hasInKey()) { setInKey(other.getInKey()); } if (other.hasInIv()) { setInIv(other.getInIv()); } if (other.hasOutKey()) { setOutKey(other.getOutKey()); } if (other.hasOutIv()) { setOutIv(other.getOutIv()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSuite()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CipherSuiteProto suite = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public boolean hasSuite() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() { return suite_; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; suite_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.CipherSuiteProto suite = 1; */ public Builder clearSuite() { bitField0_ = (bitField0_ & ~0x00000001); suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN; onChanged(); return this; } // optional bytes inKey = 2; private com.google.protobuf.ByteString inKey_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes inKey = 2; */ public boolean hasInKey() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes inKey = 2; */ public com.google.protobuf.ByteString getInKey() { return inKey_; } /** * optional bytes inKey = 2; */ public Builder setInKey(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; inKey_ = value; onChanged(); return this; } /** * optional bytes inKey = 2; */ public Builder clearInKey() { bitField0_ = (bitField0_ & ~0x00000002); inKey_ = getDefaultInstance().getInKey(); onChanged(); return this; } // optional bytes inIv = 3; private com.google.protobuf.ByteString inIv_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes inIv = 3; */ public boolean hasInIv() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes inIv = 3; */ public com.google.protobuf.ByteString getInIv() { return inIv_; } /** * optional bytes inIv = 3; */ public Builder setInIv(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; inIv_ = value; onChanged(); return this; } /** * optional bytes inIv = 3; */ public Builder clearInIv() { bitField0_ = (bitField0_ & ~0x00000004); inIv_ = getDefaultInstance().getInIv(); onChanged(); return this; } // optional bytes outKey = 4; private com.google.protobuf.ByteString outKey_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes outKey = 4; */ public boolean hasOutKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional bytes outKey = 4; */ public com.google.protobuf.ByteString getOutKey() { return outKey_; } /** * optional bytes outKey = 4; */ public Builder setOutKey(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; outKey_ = value; onChanged(); return this; } /** * optional bytes outKey = 4; */ public Builder clearOutKey() { bitField0_ = (bitField0_ & ~0x00000008); outKey_ = getDefaultInstance().getOutKey(); onChanged(); return this; } // optional bytes outIv = 5; private com.google.protobuf.ByteString outIv_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes outIv = 5; */ public boolean hasOutIv() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional bytes outIv = 5; */ public com.google.protobuf.ByteString getOutIv() { return outIv_; } /** * optional bytes outIv = 5; */ public Builder setOutIv(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; outIv_ = value; onChanged(); return this; } /** * optional bytes outIv = 5; */ public Builder clearOutIv() { bitField0_ = (bitField0_ & ~0x00000010); outIv_ = getDefaultInstance().getOutIv(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto) } static { defaultInstance = new CipherOptionProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto) } public interface LocatedBlocksProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 fileLength = 1; /** * required uint64 fileLength = 1; */ boolean hasFileLength(); /** * required uint64 fileLength = 1; */ long getFileLength(); // repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index); // required bool underConstruction = 3; /** * required bool underConstruction = 3; */ boolean hasUnderConstruction(); /** * required bool underConstruction = 3; */ boolean getUnderConstruction(); // optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ boolean hasLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder(); // required bool isLastBlockComplete = 5; /** * required bool isLastBlockComplete = 5; */ boolean hasIsLastBlockComplete(); /** * required bool isLastBlockComplete = 5; */ boolean getIsLastBlockComplete(); // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ boolean hasFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} * *
   **
   * A set of file blocks and their locations.
   * 
*/ public static final class LocatedBlocksProto extends com.google.protobuf.GeneratedMessage implements LocatedBlocksProtoOrBuilder { // Use LocatedBlocksProto.newBuilder() to construct. private LocatedBlocksProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private LocatedBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final LocatedBlocksProto defaultInstance; public static LocatedBlocksProto getDefaultInstance() { return defaultInstance; } public LocatedBlocksProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocatedBlocksProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; fileLength_ = input.readUInt64(); break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry)); break; } case 24: { bitField0_ |= 0x00000002; underConstruction_ = input.readBool(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = lastBlock_.toBuilder(); } lastBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(lastBlock_); lastBlock_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 40: { bitField0_ |= 0x00000008; isLastBlockComplete_ = input.readBool(); break; } case 50: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = fileEncryptionInfo_.toBuilder(); } fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fileEncryptionInfo_); fileEncryptionInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public LocatedBlocksProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new LocatedBlocksProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 fileLength = 1; public static final int FILELENGTH_FIELD_NUMBER = 1; private long fileLength_; /** * required uint64 fileLength = 1; */ public boolean hasFileLength() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 fileLength = 1; */ public long getFileLength() { return fileLength_; } // repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; public static final int BLOCKS_FIELD_NUMBER = 2; private java.util.List blocks_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } // required bool underConstruction = 3; public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3; private boolean underConstruction_; /** * required bool underConstruction = 3; */ public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool underConstruction = 3; */ public boolean getUnderConstruction() { return underConstruction_; } // optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; public static final int LASTBLOCK_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public boolean hasLastBlock() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { return lastBlock_; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { return lastBlock_; } // required bool isLastBlockComplete = 5; public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5; private boolean isLastBlockComplete_; /** * required bool isLastBlockComplete = 5; */ public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool isLastBlockComplete = 5; */ public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_; } private void initFields() { fileLength_ = 0L; blocks_ = java.util.Collections.emptyList(); underConstruction_ = false; lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); isLastBlockComplete_ = false; fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasFileLength()) { memoizedIsInitialized = 0; return false; } if (!hasUnderConstruction()) { memoizedIsInitialized = 0; return false; } if (!hasIsLastBlockComplete()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(3, underConstruction_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(4, lastBlock_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeMessage(6, fileEncryptionInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, fileLength_); } for (int i = 0; i < blocks_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, blocks_.get(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(3, underConstruction_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, lastBlock_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(5, isLastBlockComplete_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(6, fileEncryptionInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj; boolean result = true; result = result && (hasFileLength() == other.hasFileLength()); if (hasFileLength()) { result = result && (getFileLength() == other.getFileLength()); } result = result && getBlocksList() .equals(other.getBlocksList()); result = result && (hasUnderConstruction() == other.hasUnderConstruction()); if (hasUnderConstruction()) { result = result && (getUnderConstruction() == other.getUnderConstruction()); } result = result && (hasLastBlock() == other.hasLastBlock()); if (hasLastBlock()) { result = result && getLastBlock() .equals(other.getLastBlock()); } result = result && (hasIsLastBlockComplete() == other.hasIsLastBlockComplete()); if (hasIsLastBlockComplete()) { result = result && (getIsLastBlockComplete() == other.getIsLastBlockComplete()); } result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo()); if (hasFileEncryptionInfo()) { result = result && getFileEncryptionInfo() .equals(other.getFileEncryptionInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFileLength()) { hash = (37 * hash) + FILELENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileLength()); } if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } if (hasUnderConstruction()) { hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getUnderConstruction()); } if (hasLastBlock()) { hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER; hash = (53 * hash) + getLastBlock().hashCode(); } if (hasIsLastBlockComplete()) { hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsLastBlockComplete()); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto} * *
     **
     * A set of file blocks and their locations.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlocksFieldBuilder(); getLastBlockFieldBuilder(); getFileEncryptionInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); fileLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { blocksBuilder_.clear(); } underConstruction_ = false; bitField0_ = (bitField0_ & ~0x00000004); if (lastBlockBuilder_ == null) { lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { lastBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); isLastBlockComplete_ = false; bitField0_ = (bitField0_ & ~0x00000010); if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.fileLength_ = fileLength_; if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000002); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.underConstruction_ = underConstruction_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000004; } if (lastBlockBuilder_ == null) { result.lastBlock_ = lastBlock_; } else { result.lastBlock_ = lastBlockBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000008; } result.isLastBlockComplete_ = isLastBlockComplete_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000010; } if (fileEncryptionInfoBuilder_ == null) { result.fileEncryptionInfo_ = fileEncryptionInfo_; } else { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this; if (other.hasFileLength()) { setFileLength(other.getFileLength()); } if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000002); blocksBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } if (other.hasUnderConstruction()) { setUnderConstruction(other.getUnderConstruction()); } if (other.hasLastBlock()) { mergeLastBlock(other.getLastBlock()); } if (other.hasIsLastBlockComplete()) { setIsLastBlockComplete(other.getIsLastBlockComplete()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasFileLength()) { return false; } if (!hasUnderConstruction()) { return false; } if (!hasIsLastBlockComplete()) { return false; } for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } if (hasLastBlock()) { if (!getLastBlock().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 fileLength = 1; private long fileLength_ ; /** * required uint64 fileLength = 1; */ public boolean hasFileLength() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 fileLength = 1; */ public long getFileLength() { return fileLength_; } /** * required uint64 fileLength = 1; */ public Builder setFileLength(long value) { bitField0_ |= 0x00000001; fileLength_ = value; onChanged(); return this; } /** * required uint64 fileLength = 1; */ public Builder clearFileLength() { bitField0_ = (bitField0_ & ~0x00000001); fileLength_ = 0L; onChanged(); return this; } // repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000002; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); super.addAll(values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 2; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } // required bool underConstruction = 3; private boolean underConstruction_ ; /** * required bool underConstruction = 3; */ public boolean hasUnderConstruction() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool underConstruction = 3; */ public boolean getUnderConstruction() { return underConstruction_; } /** * required bool underConstruction = 3; */ public Builder setUnderConstruction(boolean value) { bitField0_ |= 0x00000004; underConstruction_ = value; onChanged(); return this; } /** * required bool underConstruction = 3; */ public Builder clearUnderConstruction() { bitField0_ = (bitField0_ & ~0x00000004); underConstruction_ = false; onChanged(); return this; } // optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_; /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public boolean hasLastBlock() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { if (lastBlockBuilder_ == null) { return lastBlock_; } else { return lastBlockBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } lastBlock_ = value; onChanged(); } else { lastBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder setLastBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (lastBlockBuilder_ == null) { lastBlock_ = builderForValue.build(); onChanged(); } else { lastBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (lastBlockBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial(); } else { lastBlock_ = value; } onChanged(); } else { lastBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public Builder clearLastBlock() { if (lastBlockBuilder_ == null) { lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { lastBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() { bitField0_ |= 0x00000008; onChanged(); return getLastBlockFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { if (lastBlockBuilder_ != null) { return lastBlockBuilder_.getMessageOrBuilder(); } else { return lastBlock_; } } /** * optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getLastBlockFieldBuilder() { if (lastBlockBuilder_ == null) { lastBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( lastBlock_, getParentForChildren(), isClean()); lastBlock_ = null; } return lastBlockBuilder_; } // required bool isLastBlockComplete = 5; private boolean isLastBlockComplete_ ; /** * required bool isLastBlockComplete = 5; */ public boolean hasIsLastBlockComplete() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required bool isLastBlockComplete = 5; */ public boolean getIsLastBlockComplete() { return isLastBlockComplete_; } /** * required bool isLastBlockComplete = 5; */ public Builder setIsLastBlockComplete(boolean value) { bitField0_ |= 0x00000010; isLastBlockComplete_ = value; onChanged(); return this; } /** * required bool isLastBlockComplete = 5; */ public Builder clearIsLastBlockComplete() { bitField0_ = (bitField0_ & ~0x00000010); isLastBlockComplete_ = false; onChanged(); return this; } // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial(); } else { fileEncryptionInfo_ = value; } onChanged(); } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public Builder clearFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); onChanged(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00000020; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_; } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( fileEncryptionInfo_, getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto) } static { defaultInstance = new LocatedBlocksProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto) } public interface HdfsFileStatusProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ boolean hasFileType(); /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType(); // required bytes path = 2; /** * required bytes path = 2; * *
     * local name of inode encoded java UTF8
     * 
*/ boolean hasPath(); /** * required bytes path = 2; * *
     * local name of inode encoded java UTF8
     * 
*/ com.google.protobuf.ByteString getPath(); // required uint64 length = 3; /** * required uint64 length = 3; */ boolean hasLength(); /** * required uint64 length = 3; */ long getLength(); // required .hadoop.hdfs.FsPermissionProto permission = 4; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); // required string owner = 5; /** * required string owner = 5; */ boolean hasOwner(); /** * required string owner = 5; */ java.lang.String getOwner(); /** * required string owner = 5; */ com.google.protobuf.ByteString getOwnerBytes(); // required string group = 6; /** * required string group = 6; */ boolean hasGroup(); /** * required string group = 6; */ java.lang.String getGroup(); /** * required string group = 6; */ com.google.protobuf.ByteString getGroupBytes(); // required uint64 modification_time = 7; /** * required uint64 modification_time = 7; */ boolean hasModificationTime(); /** * required uint64 modification_time = 7; */ long getModificationTime(); // required uint64 access_time = 8; /** * required uint64 access_time = 8; */ boolean hasAccessTime(); /** * required uint64 access_time = 8; */ long getAccessTime(); // optional bytes symlink = 9; /** * optional bytes symlink = 9; * *
     * Optional fields for symlink
     * 
*/ boolean hasSymlink(); /** * optional bytes symlink = 9; * *
     * Optional fields for symlink
     * 
*/ com.google.protobuf.ByteString getSymlink(); // optional uint32 block_replication = 10 [default = 0]; /** * optional uint32 block_replication = 10 [default = 0]; * *
     * Optional fields for file
     * 
*/ boolean hasBlockReplication(); /** * optional uint32 block_replication = 10 [default = 0]; * *
     * Optional fields for file
     * 
*/ int getBlockReplication(); // optional uint64 blocksize = 11 [default = 0]; /** * optional uint64 blocksize = 11 [default = 0]; */ boolean hasBlocksize(); /** * optional uint64 blocksize = 11 [default = 0]; */ long getBlocksize(); // optional .hadoop.hdfs.LocatedBlocksProto locations = 12; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ boolean hasLocations(); /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); // optional uint64 fileId = 13 [default = 0]; /** * optional uint64 fileId = 13 [default = 0]; * *
     * Optional field for fileId
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 13 [default = 0]; * *
     * Optional field for fileId
     * 
*/ long getFileId(); // optional int32 childrenNum = 14 [default = -1]; /** * optional int32 childrenNum = 14 [default = -1]; */ boolean hasChildrenNum(); /** * optional int32 childrenNum = 14 [default = -1]; */ int getChildrenNum(); // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ boolean hasFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo(); /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder(); // optional uint32 storagePolicy = 16 [default = 0]; /** * optional uint32 storagePolicy = 16 [default = 0]; * *
     * block storage policy id
     * 
*/ boolean hasStoragePolicy(); /** * optional uint32 storagePolicy = 16 [default = 0]; * *
     * block storage policy id
     * 
*/ int getStoragePolicy(); } /** * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} * *
   **
   * Status of a file, directory or symlink
   * Optionally includes a file's block locations if requested by client on the rpc call.
   * 
*/ public static final class HdfsFileStatusProto extends com.google.protobuf.GeneratedMessage implements HdfsFileStatusProtoOrBuilder { // Use HdfsFileStatusProto.newBuilder() to construct. private HdfsFileStatusProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private HdfsFileStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final HdfsFileStatusProto defaultInstance; public static HdfsFileStatusProto getDefaultInstance() { return defaultInstance; } public HdfsFileStatusProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private HdfsFileStatusProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; fileType_ = value; } break; } case 18: { bitField0_ |= 0x00000002; path_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; length_ = input.readUInt64(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = permission_.toBuilder(); } permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(permission_); permission_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { bitField0_ |= 0x00000010; owner_ = input.readBytes(); break; } case 50: { bitField0_ |= 0x00000020; group_ = input.readBytes(); break; } case 56: { bitField0_ |= 0x00000040; modificationTime_ = input.readUInt64(); break; } case 64: { bitField0_ |= 0x00000080; accessTime_ = input.readUInt64(); break; } case 74: { bitField0_ |= 0x00000100; symlink_ = input.readBytes(); break; } case 80: { bitField0_ |= 0x00000200; blockReplication_ = input.readUInt32(); break; } case 88: { bitField0_ |= 0x00000400; blocksize_ = input.readUInt64(); break; } case 98: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = null; if (((bitField0_ & 0x00000800) == 0x00000800)) { subBuilder = locations_.toBuilder(); } locations_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(locations_); locations_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000800; break; } case 104: { bitField0_ |= 0x00001000; fileId_ = input.readUInt64(); break; } case 112: { bitField0_ |= 0x00002000; childrenNum_ = input.readInt32(); break; } case 122: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00004000) == 0x00004000)) { subBuilder = fileEncryptionInfo_.toBuilder(); } fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fileEncryptionInfo_); fileEncryptionInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00004000; break; } case 128: { bitField0_ |= 0x00008000; storagePolicy_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public HdfsFileStatusProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new HdfsFileStatusProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } /** * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType} */ public enum FileType implements com.google.protobuf.ProtocolMessageEnum { /** * IS_DIR = 1; */ IS_DIR(0, 1), /** * IS_FILE = 2; */ IS_FILE(1, 2), /** * IS_SYMLINK = 3; */ IS_SYMLINK(2, 3), ; /** * IS_DIR = 1; */ public static final int IS_DIR_VALUE = 1; /** * IS_FILE = 2; */ public static final int IS_FILE_VALUE = 2; /** * IS_SYMLINK = 3; */ public static final int IS_SYMLINK_VALUE = 3; public final int getNumber() { return value; } public static FileType valueOf(int value) { switch (value) { case 1: return IS_DIR; case 2: return IS_FILE; case 3: return IS_SYMLINK; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public FileType findValueByNumber(int number) { return FileType.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0); } private static final FileType[] VALUES = values(); public static FileType valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private FileType(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType) } private int bitField0_; // required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; public static final int FILETYPE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public boolean hasFileType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { return fileType_; } // required bytes path = 2; public static final int PATH_FIELD_NUMBER = 2; private com.google.protobuf.ByteString path_; /** * required bytes path = 2; * *
     * local name of inode encoded java UTF8
     * 
*/ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes path = 2; * *
     * local name of inode encoded java UTF8
     * 
*/ public com.google.protobuf.ByteString getPath() { return path_; } // required uint64 length = 3; public static final int LENGTH_FIELD_NUMBER = 3; private long length_; /** * required uint64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 length = 3; */ public long getLength() { return length_; } // required .hadoop.hdfs.FsPermissionProto permission = 4; public static final int PERMISSION_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { return permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_; } // required string owner = 5; public static final int OWNER_FIELD_NUMBER = 5; private java.lang.Object owner_; /** * required string owner = 5; */ public boolean hasOwner() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string owner = 5; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 5; */ public com.google.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string group = 6; public static final int GROUP_FIELD_NUMBER = 6; private java.lang.Object group_; /** * required string group = 6; */ public boolean hasGroup() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string group = 6; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 6; */ public com.google.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint64 modification_time = 7; public static final int MODIFICATION_TIME_FIELD_NUMBER = 7; private long modificationTime_; /** * required uint64 modification_time = 7; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * required uint64 modification_time = 7; */ public long getModificationTime() { return modificationTime_; } // required uint64 access_time = 8; public static final int ACCESS_TIME_FIELD_NUMBER = 8; private long accessTime_; /** * required uint64 access_time = 8; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * required uint64 access_time = 8; */ public long getAccessTime() { return accessTime_; } // optional bytes symlink = 9; public static final int SYMLINK_FIELD_NUMBER = 9; private com.google.protobuf.ByteString symlink_; /** * optional bytes symlink = 9; * *
     * Optional fields for symlink
     * 
*/ public boolean hasSymlink() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional bytes symlink = 9; * *
     * Optional fields for symlink
     * 
*/ public com.google.protobuf.ByteString getSymlink() { return symlink_; } // optional uint32 block_replication = 10 [default = 0]; public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10; private int blockReplication_; /** * optional uint32 block_replication = 10 [default = 0]; * *
     * Optional fields for file
     * 
*/ public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional uint32 block_replication = 10 [default = 0]; * *
     * Optional fields for file
     * 
*/ public int getBlockReplication() { return blockReplication_; } // optional uint64 blocksize = 11 [default = 0]; public static final int BLOCKSIZE_FIELD_NUMBER = 11; private long blocksize_; /** * optional uint64 blocksize = 11 [default = 0]; */ public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional uint64 blocksize = 11 [default = 0]; */ public long getBlocksize() { return blocksize_; } // optional .hadoop.hdfs.LocatedBlocksProto locations = 12; public static final int LOCATIONS_FIELD_NUMBER = 12; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ public boolean hasLocations() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { return locations_; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
     * suppled only if asked by client
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { return locations_; } // optional uint64 fileId = 13 [default = 0]; public static final int FILEID_FIELD_NUMBER = 13; private long fileId_; /** * optional uint64 fileId = 13 [default = 0]; * *
     * Optional field for fileId
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional uint64 fileId = 13 [default = 0]; * *
     * Optional field for fileId
     * 
*/ public long getFileId() { return fileId_; } // optional int32 childrenNum = 14 [default = -1]; public static final int CHILDRENNUM_FIELD_NUMBER = 14; private int childrenNum_; /** * optional int32 childrenNum = 14 [default = -1]; */ public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional int32 childrenNum = 14 [default = -1]; */ public int getChildrenNum() { return childrenNum_; } // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) == 0x00004000); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { return fileEncryptionInfo_; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
     * Optional field for file encryption
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { return fileEncryptionInfo_; } // optional uint32 storagePolicy = 16 [default = 0]; public static final int STORAGEPOLICY_FIELD_NUMBER = 16; private int storagePolicy_; /** * optional uint32 storagePolicy = 16 [default = 0]; * *
     * block storage policy id
     * 
*/ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) == 0x00008000); } /** * optional uint32 storagePolicy = 16 [default = 0]; * *
     * block storage policy id
     * 
*/ public int getStoragePolicy() { return storagePolicy_; } private void initFields() { fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; path_ = com.google.protobuf.ByteString.EMPTY; length_ = 0L; permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); owner_ = ""; group_ = ""; modificationTime_ = 0L; accessTime_ = 0L; symlink_ = com.google.protobuf.ByteString.EMPTY; blockReplication_ = 0; blocksize_ = 0L; locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); fileId_ = 0L; childrenNum_ = -1; fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); storagePolicy_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasFileType()) { memoizedIsInitialized = 0; return false; } if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasModificationTime()) { memoizedIsInitialized = 0; return false; } if (!hasAccessTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, fileType_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, path_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, length_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, permission_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, getOwnerBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getGroupBytes()); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt64(7, modificationTime_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeUInt64(8, accessTime_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeBytes(9, symlink_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeUInt32(10, blockReplication_); } if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeUInt64(11, blocksize_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeMessage(12, locations_); } if (((bitField0_ & 0x00001000) == 0x00001000)) { output.writeUInt64(13, fileId_); } if (((bitField0_ & 0x00002000) == 0x00002000)) { output.writeInt32(14, childrenNum_); } if (((bitField0_ & 0x00004000) == 0x00004000)) { output.writeMessage(15, fileEncryptionInfo_); } if (((bitField0_ & 0x00008000) == 0x00008000)) { output.writeUInt32(16, storagePolicy_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, fileType_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, path_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, length_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, permission_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(5, getOwnerBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getGroupBytes()); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(7, modificationTime_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(8, accessTime_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(9, symlink_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(10, blockReplication_); } if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(11, blocksize_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(12, locations_); } if (((bitField0_ & 0x00001000) == 0x00001000)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(13, fileId_); } if (((bitField0_ & 0x00002000) == 0x00002000)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(14, childrenNum_); } if (((bitField0_ & 0x00004000) == 0x00004000)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(15, fileEncryptionInfo_); } if (((bitField0_ & 0x00008000) == 0x00008000)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(16, storagePolicy_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj; boolean result = true; result = result && (hasFileType() == other.hasFileType()); if (hasFileType()) { result = result && (getFileType() == other.getFileType()); } result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasLength() == other.hasLength()); if (hasLength()) { result = result && (getLength() == other.getLength()); } result = result && (hasPermission() == other.hasPermission()); if (hasPermission()) { result = result && getPermission() .equals(other.getPermission()); } result = result && (hasOwner() == other.hasOwner()); if (hasOwner()) { result = result && getOwner() .equals(other.getOwner()); } result = result && (hasGroup() == other.hasGroup()); if (hasGroup()) { result = result && getGroup() .equals(other.getGroup()); } result = result && (hasModificationTime() == other.hasModificationTime()); if (hasModificationTime()) { result = result && (getModificationTime() == other.getModificationTime()); } result = result && (hasAccessTime() == other.hasAccessTime()); if (hasAccessTime()) { result = result && (getAccessTime() == other.getAccessTime()); } result = result && (hasSymlink() == other.hasSymlink()); if (hasSymlink()) { result = result && getSymlink() .equals(other.getSymlink()); } result = result && (hasBlockReplication() == other.hasBlockReplication()); if (hasBlockReplication()) { result = result && (getBlockReplication() == other.getBlockReplication()); } result = result && (hasBlocksize() == other.hasBlocksize()); if (hasBlocksize()) { result = result && (getBlocksize() == other.getBlocksize()); } result = result && (hasLocations() == other.hasLocations()); if (hasLocations()) { result = result && getLocations() .equals(other.getLocations()); } result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && (hasChildrenNum() == other.hasChildrenNum()); if (hasChildrenNum()) { result = result && (getChildrenNum() == other.getChildrenNum()); } result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo()); if (hasFileEncryptionInfo()) { result = result && getFileEncryptionInfo() .equals(other.getFileEncryptionInfo()); } result = result && (hasStoragePolicy() == other.hasStoragePolicy()); if (hasStoragePolicy()) { result = result && (getStoragePolicy() == other.getStoragePolicy()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFileType()) { hash = (37 * hash) + FILETYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getFileType()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLength()); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasModificationTime()) { hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getModificationTime()); } if (hasAccessTime()) { hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getAccessTime()); } if (hasSymlink()) { hash = (37 * hash) + SYMLINK_FIELD_NUMBER; hash = (53 * hash) + getSymlink().hashCode(); } if (hasBlockReplication()) { hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getBlockReplication(); } if (hasBlocksize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlocksize()); } if (hasLocations()) { hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; hash = (53 * hash) + getLocations().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } if (hasChildrenNum()) { hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER; hash = (53 * hash) + getChildrenNum(); } if (hasFileEncryptionInfo()) { hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER; hash = (53 * hash) + getFileEncryptionInfo().hashCode(); } if (hasStoragePolicy()) { hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER; hash = (53 * hash) + getStoragePolicy(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto} * *
     **
     * Status of a file, directory or symlink
     * Optionally includes a file's block locations if requested by client on the rpc call.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPermissionFieldBuilder(); getLocationsFieldBuilder(); getFileEncryptionInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; bitField0_ = (bitField0_ & ~0x00000001); path_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000010); group_ = ""; bitField0_ = (bitField0_ & ~0x00000020); modificationTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); accessTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); symlink_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000100); blockReplication_ = 0; bitField0_ = (bitField0_ & ~0x00000200); blocksize_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); if (locationsBuilder_ == null) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000800); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00001000); childrenNum_ = -1; bitField0_ = (bitField0_ & ~0x00002000); if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00004000); storagePolicy_ = 0; bitField0_ = (bitField0_ & ~0x00008000); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.fileType_ = fileType_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.path_ = path_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.length_ = length_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (permissionBuilder_ == null) { result.permission_ = permission_; } else { result.permission_ = permissionBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.owner_ = owner_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.group_ = group_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.modificationTime_ = modificationTime_; if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.accessTime_ = accessTime_; if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } result.symlink_ = symlink_; if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000200; } result.blockReplication_ = blockReplication_; if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000400; } result.blocksize_ = blocksize_; if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000800; } if (locationsBuilder_ == null) { result.locations_ = locations_; } else { result.locations_ = locationsBuilder_.build(); } if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00001000; } result.fileId_ = fileId_; if (((from_bitField0_ & 0x00002000) == 0x00002000)) { to_bitField0_ |= 0x00002000; } result.childrenNum_ = childrenNum_; if (((from_bitField0_ & 0x00004000) == 0x00004000)) { to_bitField0_ |= 0x00004000; } if (fileEncryptionInfoBuilder_ == null) { result.fileEncryptionInfo_ = fileEncryptionInfo_; } else { result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build(); } if (((from_bitField0_ & 0x00008000) == 0x00008000)) { to_bitField0_ |= 0x00008000; } result.storagePolicy_ = storagePolicy_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this; if (other.hasFileType()) { setFileType(other.getFileType()); } if (other.hasPath()) { setPath(other.getPath()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { bitField0_ |= 0x00000010; owner_ = other.owner_; onChanged(); } if (other.hasGroup()) { bitField0_ |= 0x00000020; group_ = other.group_; onChanged(); } if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); } if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); } if (other.hasSymlink()) { setSymlink(other.getSymlink()); } if (other.hasBlockReplication()) { setBlockReplication(other.getBlockReplication()); } if (other.hasBlocksize()) { setBlocksize(other.getBlocksize()); } if (other.hasLocations()) { mergeLocations(other.getLocations()); } if (other.hasFileId()) { setFileId(other.getFileId()); } if (other.hasChildrenNum()) { setChildrenNum(other.getChildrenNum()); } if (other.hasFileEncryptionInfo()) { mergeFileEncryptionInfo(other.getFileEncryptionInfo()); } if (other.hasStoragePolicy()) { setStoragePolicy(other.getStoragePolicy()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasFileType()) { return false; } if (!hasPath()) { return false; } if (!hasLength()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasModificationTime()) { return false; } if (!hasAccessTime()) { return false; } if (!getPermission().isInitialized()) { return false; } if (hasLocations()) { if (!getLocations().isInitialized()) { return false; } } if (hasFileEncryptionInfo()) { if (!getFileEncryptionInfo().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public boolean hasFileType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { return fileType_; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fileType_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1; */ public Builder clearFileType() { bitField0_ = (bitField0_ & ~0x00000001); fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; onChanged(); return this; } // required bytes path = 2; private com.google.protobuf.ByteString path_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes path = 2; * *
       * local name of inode encoded java UTF8
       * 
*/ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes path = 2; * *
       * local name of inode encoded java UTF8
       * 
*/ public com.google.protobuf.ByteString getPath() { return path_; } /** * required bytes path = 2; * *
       * local name of inode encoded java UTF8
       * 
*/ public Builder setPath(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } /** * required bytes path = 2; * *
       * local name of inode encoded java UTF8
       * 
*/ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000002); path_ = getDefaultInstance().getPath(); onChanged(); return this; } // required uint64 length = 3; private long length_ ; /** * required uint64 length = 3; */ public boolean hasLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 length = 3; */ public long getLength() { return length_; } /** * required uint64 length = 3; */ public Builder setLength(long value) { bitField0_ |= 0x00000004; length_ = value; onChanged(); return this; } /** * required uint64 length = 3; */ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto permission = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public boolean hasPermission() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; onChanged(); } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); onChanged(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); } else { permission_ = value; } onChanged(); } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public Builder clearPermission() { if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000008; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( permission_, getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } // required string owner = 5; private java.lang.Object owner_ = ""; /** * required string owner = 5; */ public boolean hasOwner() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string owner = 5; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); owner_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string owner = 5; */ public com.google.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string owner = 5; */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; owner_ = value; onChanged(); return this; } /** * required string owner = 5; */ public Builder clearOwner() { bitField0_ = (bitField0_ & ~0x00000010); owner_ = getDefaultInstance().getOwner(); onChanged(); return this; } /** * required string owner = 5; */ public Builder setOwnerBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; owner_ = value; onChanged(); return this; } // required string group = 6; private java.lang.Object group_ = ""; /** * required string group = 6; */ public boolean hasGroup() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string group = 6; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); group_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string group = 6; */ public com.google.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string group = 6; */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; group_ = value; onChanged(); return this; } /** * required string group = 6; */ public Builder clearGroup() { bitField0_ = (bitField0_ & ~0x00000020); group_ = getDefaultInstance().getGroup(); onChanged(); return this; } /** * required string group = 6; */ public Builder setGroupBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; group_ = value; onChanged(); return this; } // required uint64 modification_time = 7; private long modificationTime_ ; /** * required uint64 modification_time = 7; */ public boolean hasModificationTime() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * required uint64 modification_time = 7; */ public long getModificationTime() { return modificationTime_; } /** * required uint64 modification_time = 7; */ public Builder setModificationTime(long value) { bitField0_ |= 0x00000040; modificationTime_ = value; onChanged(); return this; } /** * required uint64 modification_time = 7; */ public Builder clearModificationTime() { bitField0_ = (bitField0_ & ~0x00000040); modificationTime_ = 0L; onChanged(); return this; } // required uint64 access_time = 8; private long accessTime_ ; /** * required uint64 access_time = 8; */ public boolean hasAccessTime() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * required uint64 access_time = 8; */ public long getAccessTime() { return accessTime_; } /** * required uint64 access_time = 8; */ public Builder setAccessTime(long value) { bitField0_ |= 0x00000080; accessTime_ = value; onChanged(); return this; } /** * required uint64 access_time = 8; */ public Builder clearAccessTime() { bitField0_ = (bitField0_ & ~0x00000080); accessTime_ = 0L; onChanged(); return this; } // optional bytes symlink = 9; private com.google.protobuf.ByteString symlink_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes symlink = 9; * *
       * Optional fields for symlink
       * 
*/ public boolean hasSymlink() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional bytes symlink = 9; * *
       * Optional fields for symlink
       * 
*/ public com.google.protobuf.ByteString getSymlink() { return symlink_; } /** * optional bytes symlink = 9; * *
       * Optional fields for symlink
       * 
*/ public Builder setSymlink(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000100; symlink_ = value; onChanged(); return this; } /** * optional bytes symlink = 9; * *
       * Optional fields for symlink
       * 
*/ public Builder clearSymlink() { bitField0_ = (bitField0_ & ~0x00000100); symlink_ = getDefaultInstance().getSymlink(); onChanged(); return this; } // optional uint32 block_replication = 10 [default = 0]; private int blockReplication_ ; /** * optional uint32 block_replication = 10 [default = 0]; * *
       * Optional fields for file
       * 
*/ public boolean hasBlockReplication() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional uint32 block_replication = 10 [default = 0]; * *
       * Optional fields for file
       * 
*/ public int getBlockReplication() { return blockReplication_; } /** * optional uint32 block_replication = 10 [default = 0]; * *
       * Optional fields for file
       * 
*/ public Builder setBlockReplication(int value) { bitField0_ |= 0x00000200; blockReplication_ = value; onChanged(); return this; } /** * optional uint32 block_replication = 10 [default = 0]; * *
       * Optional fields for file
       * 
*/ public Builder clearBlockReplication() { bitField0_ = (bitField0_ & ~0x00000200); blockReplication_ = 0; onChanged(); return this; } // optional uint64 blocksize = 11 [default = 0]; private long blocksize_ ; /** * optional uint64 blocksize = 11 [default = 0]; */ public boolean hasBlocksize() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional uint64 blocksize = 11 [default = 0]; */ public long getBlocksize() { return blocksize_; } /** * optional uint64 blocksize = 11 [default = 0]; */ public Builder setBlocksize(long value) { bitField0_ |= 0x00000400; blocksize_ = value; onChanged(); return this; } /** * optional uint64 blocksize = 11 [default = 0]; */ public Builder clearBlocksize() { bitField0_ = (bitField0_ & ~0x00000400); blocksize_ = 0L; onChanged(); return this; } // optional .hadoop.hdfs.LocatedBlocksProto locations = 12; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public boolean hasLocations() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { if (locationsBuilder_ == null) { return locations_; } else { return locationsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } locations_ = value; onChanged(); } else { locationsBuilder_.setMessage(value); } bitField0_ |= 0x00000800; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public Builder setLocations( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { if (locationsBuilder_ == null) { locations_ = builderForValue.build(); onChanged(); } else { locationsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000800; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (((bitField0_ & 0x00000800) == 0x00000800) && locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial(); } else { locations_ = value; } onChanged(); } else { locationsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000800; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public Builder clearLocations() { if (locationsBuilder_ == null) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); onChanged(); } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000800); return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { bitField0_ |= 0x00000800; onChanged(); return getLocationsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { if (locationsBuilder_ != null) { return locationsBuilder_.getMessageOrBuilder(); } else { return locations_; } } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 12; * *
       * suppled only if asked by client
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> getLocationsFieldBuilder() { if (locationsBuilder_ == null) { locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( locations_, getParentForChildren(), isClean()); locations_ = null; } return locationsBuilder_; } // optional uint64 fileId = 13 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 13 [default = 0]; * *
       * Optional field for fileId
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional uint64 fileId = 13 [default = 0]; * *
       * Optional field for fileId
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 13 [default = 0]; * *
       * Optional field for fileId
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00001000; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 13 [default = 0]; * *
       * Optional field for fileId
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00001000); fileId_ = 0L; onChanged(); return this; } // optional int32 childrenNum = 14 [default = -1]; private int childrenNum_ = -1; /** * optional int32 childrenNum = 14 [default = -1]; */ public boolean hasChildrenNum() { return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional int32 childrenNum = 14 [default = -1]; */ public int getChildrenNum() { return childrenNum_; } /** * optional int32 childrenNum = 14 [default = -1]; */ public Builder setChildrenNum(int value) { bitField0_ |= 0x00002000; childrenNum_ = value; onChanged(); return this; } /** * optional int32 childrenNum = 14 [default = -1]; */ public Builder clearChildrenNum() { bitField0_ = (bitField0_ & ~0x00002000); childrenNum_ = -1; onChanged(); return this; } // optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_; /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public boolean hasFileEncryptionInfo() { return ((bitField0_ & 0x00004000) == 0x00004000); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { return fileEncryptionInfo_; } else { return fileEncryptionInfoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fileEncryptionInfo_ = value; onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(value); } bitField0_ |= 0x00004000; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public Builder setFileEncryptionInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = builderForValue.build(); onChanged(); } else { fileEncryptionInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00004000; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) { if (fileEncryptionInfoBuilder_ == null) { if (((bitField0_ & 0x00004000) == 0x00004000) && fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial(); } else { fileEncryptionInfo_ = value; } onChanged(); } else { fileEncryptionInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00004000; return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public Builder clearFileEncryptionInfo() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance(); onChanged(); } else { fileEncryptionInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00004000); return this; } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() { bitField0_ |= 0x00004000; onChanged(); return getFileEncryptionInfoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() { if (fileEncryptionInfoBuilder_ != null) { return fileEncryptionInfoBuilder_.getMessageOrBuilder(); } else { return fileEncryptionInfo_; } } /** * optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15; * *
       * Optional field for file encryption
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> getFileEncryptionInfoFieldBuilder() { if (fileEncryptionInfoBuilder_ == null) { fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>( fileEncryptionInfo_, getParentForChildren(), isClean()); fileEncryptionInfo_ = null; } return fileEncryptionInfoBuilder_; } // optional uint32 storagePolicy = 16 [default = 0]; private int storagePolicy_ ; /** * optional uint32 storagePolicy = 16 [default = 0]; * *
       * block storage policy id
       * 
*/ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00008000) == 0x00008000); } /** * optional uint32 storagePolicy = 16 [default = 0]; * *
       * block storage policy id
       * 
*/ public int getStoragePolicy() { return storagePolicy_; } /** * optional uint32 storagePolicy = 16 [default = 0]; * *
       * block storage policy id
       * 
*/ public Builder setStoragePolicy(int value) { bitField0_ |= 0x00008000; storagePolicy_ = value; onChanged(); return this; } /** * optional uint32 storagePolicy = 16 [default = 0]; * *
       * block storage policy id
       * 
*/ public Builder clearStoragePolicy() { bitField0_ = (bitField0_ & ~0x00008000); storagePolicy_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto) } static { defaultInstance = new HdfsFileStatusProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto) } public interface FsServerDefaultsProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 blockSize = 1; /** * required uint64 blockSize = 1; */ boolean hasBlockSize(); /** * required uint64 blockSize = 1; */ long getBlockSize(); // required uint32 bytesPerChecksum = 2; /** * required uint32 bytesPerChecksum = 2; */ boolean hasBytesPerChecksum(); /** * required uint32 bytesPerChecksum = 2; */ int getBytesPerChecksum(); // required uint32 writePacketSize = 3; /** * required uint32 writePacketSize = 3; */ boolean hasWritePacketSize(); /** * required uint32 writePacketSize = 3; */ int getWritePacketSize(); // required uint32 replication = 4; /** * required uint32 replication = 4; * *
     * Actually a short - only 16 bits used
     * 
*/ boolean hasReplication(); /** * required uint32 replication = 4; * *
     * Actually a short - only 16 bits used
     * 
*/ int getReplication(); // required uint32 fileBufferSize = 5; /** * required uint32 fileBufferSize = 5; */ boolean hasFileBufferSize(); /** * required uint32 fileBufferSize = 5; */ int getFileBufferSize(); // optional bool encryptDataTransfer = 6 [default = false]; /** * optional bool encryptDataTransfer = 6 [default = false]; */ boolean hasEncryptDataTransfer(); /** * optional bool encryptDataTransfer = 6 [default = false]; */ boolean getEncryptDataTransfer(); // optional uint64 trashInterval = 7 [default = 0]; /** * optional uint64 trashInterval = 7 [default = 0]; */ boolean hasTrashInterval(); /** * optional uint64 trashInterval = 7 [default = 0]; */ long getTrashInterval(); // optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ boolean hasChecksumType(); /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType(); } /** * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} * *
   **
   * HDFS Server Defaults
   * 
*/ public static final class FsServerDefaultsProto extends com.google.protobuf.GeneratedMessage implements FsServerDefaultsProtoOrBuilder { // Use FsServerDefaultsProto.newBuilder() to construct. private FsServerDefaultsProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FsServerDefaultsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FsServerDefaultsProto defaultInstance; public static FsServerDefaultsProto getDefaultInstance() { return defaultInstance; } public FsServerDefaultsProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FsServerDefaultsProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; blockSize_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; bytesPerChecksum_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; writePacketSize_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000008; replication_ = input.readUInt32(); break; } case 40: { bitField0_ |= 0x00000010; fileBufferSize_ = input.readUInt32(); break; } case 48: { bitField0_ |= 0x00000020; encryptDataTransfer_ = input.readBool(); break; } case 56: { bitField0_ |= 0x00000040; trashInterval_ = input.readUInt64(); break; } case 64: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(8, rawValue); } else { bitField0_ |= 0x00000080; checksumType_ = value; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public FsServerDefaultsProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new FsServerDefaultsProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 blockSize = 1; public static final int BLOCKSIZE_FIELD_NUMBER = 1; private long blockSize_; /** * required uint64 blockSize = 1; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 blockSize = 1; */ public long getBlockSize() { return blockSize_; } // required uint32 bytesPerChecksum = 2; public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2; private int bytesPerChecksum_; /** * required uint32 bytesPerChecksum = 2; */ public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 bytesPerChecksum = 2; */ public int getBytesPerChecksum() { return bytesPerChecksum_; } // required uint32 writePacketSize = 3; public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3; private int writePacketSize_; /** * required uint32 writePacketSize = 3; */ public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 writePacketSize = 3; */ public int getWritePacketSize() { return writePacketSize_; } // required uint32 replication = 4; public static final int REPLICATION_FIELD_NUMBER = 4; private int replication_; /** * required uint32 replication = 4; * *
     * Actually a short - only 16 bits used
     * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 replication = 4; * *
     * Actually a short - only 16 bits used
     * 
*/ public int getReplication() { return replication_; } // required uint32 fileBufferSize = 5; public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5; private int fileBufferSize_; /** * required uint32 fileBufferSize = 5; */ public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint32 fileBufferSize = 5; */ public int getFileBufferSize() { return fileBufferSize_; } // optional bool encryptDataTransfer = 6 [default = false]; public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6; private boolean encryptDataTransfer_; /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } // optional uint64 trashInterval = 7 [default = 0]; public static final int TRASHINTERVAL_FIELD_NUMBER = 7; private long trashInterval_; /** * optional uint64 trashInterval = 7 [default = 0]; */ public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 trashInterval = 7 [default = 0]; */ public long getTrashInterval() { return trashInterval_; } // optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; public static final int CHECKSUMTYPE_FIELD_NUMBER = 8; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { return checksumType_; } private void initFields() { blockSize_ = 0L; bytesPerChecksum_ = 0; writePacketSize_ = 0; replication_ = 0; fileBufferSize_ = 0; encryptDataTransfer_ = false; trashInterval_ = 0L; checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlockSize()) { memoizedIsInitialized = 0; return false; } if (!hasBytesPerChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasWritePacketSize()) { memoizedIsInitialized = 0; return false; } if (!hasReplication()) { memoizedIsInitialized = 0; return false; } if (!hasFileBufferSize()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, blockSize_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, writePacketSize_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt32(4, replication_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt32(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBool(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt64(7, trashInterval_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeEnum(8, checksumType_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, blockSize_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, bytesPerChecksum_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(3, writePacketSize_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(4, replication_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(5, fileBufferSize_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(6, encryptDataTransfer_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(7, trashInterval_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(8, checksumType_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj; boolean result = true; result = result && (hasBlockSize() == other.hasBlockSize()); if (hasBlockSize()) { result = result && (getBlockSize() == other.getBlockSize()); } result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum()); if (hasBytesPerChecksum()) { result = result && (getBytesPerChecksum() == other.getBytesPerChecksum()); } result = result && (hasWritePacketSize() == other.hasWritePacketSize()); if (hasWritePacketSize()) { result = result && (getWritePacketSize() == other.getWritePacketSize()); } result = result && (hasReplication() == other.hasReplication()); if (hasReplication()) { result = result && (getReplication() == other.getReplication()); } result = result && (hasFileBufferSize() == other.hasFileBufferSize()); if (hasFileBufferSize()) { result = result && (getFileBufferSize() == other.getFileBufferSize()); } result = result && (hasEncryptDataTransfer() == other.hasEncryptDataTransfer()); if (hasEncryptDataTransfer()) { result = result && (getEncryptDataTransfer() == other.getEncryptDataTransfer()); } result = result && (hasTrashInterval() == other.hasTrashInterval()); if (hasTrashInterval()) { result = result && (getTrashInterval() == other.getTrashInterval()); } result = result && (hasChecksumType() == other.hasChecksumType()); if (hasChecksumType()) { result = result && (getChecksumType() == other.getChecksumType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlockSize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockSize()); } if (hasBytesPerChecksum()) { hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getBytesPerChecksum(); } if (hasWritePacketSize()) { hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER; hash = (53 * hash) + getWritePacketSize(); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasFileBufferSize()) { hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER; hash = (53 * hash) + getFileBufferSize(); } if (hasEncryptDataTransfer()) { hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getEncryptDataTransfer()); } if (hasTrashInterval()) { hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTrashInterval()); } if (hasChecksumType()) { hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getChecksumType()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto} * *
     **
     * HDFS Server Defaults
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); blockSize_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); bytesPerChecksum_ = 0; bitField0_ = (bitField0_ & ~0x00000002); writePacketSize_ = 0; bitField0_ = (bitField0_ & ~0x00000004); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000008); fileBufferSize_ = 0; bitField0_ = (bitField0_ & ~0x00000010); encryptDataTransfer_ = false; bitField0_ = (bitField0_ & ~0x00000020); trashInterval_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32; bitField0_ = (bitField0_ & ~0x00000080); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.blockSize_ = blockSize_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.bytesPerChecksum_ = bytesPerChecksum_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.writePacketSize_ = writePacketSize_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.replication_ = replication_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.fileBufferSize_ = fileBufferSize_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.encryptDataTransfer_ = encryptDataTransfer_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.trashInterval_ = trashInterval_; if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.checksumType_ = checksumType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this; if (other.hasBlockSize()) { setBlockSize(other.getBlockSize()); } if (other.hasBytesPerChecksum()) { setBytesPerChecksum(other.getBytesPerChecksum()); } if (other.hasWritePacketSize()) { setWritePacketSize(other.getWritePacketSize()); } if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasFileBufferSize()) { setFileBufferSize(other.getFileBufferSize()); } if (other.hasEncryptDataTransfer()) { setEncryptDataTransfer(other.getEncryptDataTransfer()); } if (other.hasTrashInterval()) { setTrashInterval(other.getTrashInterval()); } if (other.hasChecksumType()) { setChecksumType(other.getChecksumType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlockSize()) { return false; } if (!hasBytesPerChecksum()) { return false; } if (!hasWritePacketSize()) { return false; } if (!hasReplication()) { return false; } if (!hasFileBufferSize()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 blockSize = 1; private long blockSize_ ; /** * required uint64 blockSize = 1; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 blockSize = 1; */ public long getBlockSize() { return blockSize_; } /** * required uint64 blockSize = 1; */ public Builder setBlockSize(long value) { bitField0_ |= 0x00000001; blockSize_ = value; onChanged(); return this; } /** * required uint64 blockSize = 1; */ public Builder clearBlockSize() { bitField0_ = (bitField0_ & ~0x00000001); blockSize_ = 0L; onChanged(); return this; } // required uint32 bytesPerChecksum = 2; private int bytesPerChecksum_ ; /** * required uint32 bytesPerChecksum = 2; */ public boolean hasBytesPerChecksum() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 bytesPerChecksum = 2; */ public int getBytesPerChecksum() { return bytesPerChecksum_; } /** * required uint32 bytesPerChecksum = 2; */ public Builder setBytesPerChecksum(int value) { bitField0_ |= 0x00000002; bytesPerChecksum_ = value; onChanged(); return this; } /** * required uint32 bytesPerChecksum = 2; */ public Builder clearBytesPerChecksum() { bitField0_ = (bitField0_ & ~0x00000002); bytesPerChecksum_ = 0; onChanged(); return this; } // required uint32 writePacketSize = 3; private int writePacketSize_ ; /** * required uint32 writePacketSize = 3; */ public boolean hasWritePacketSize() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 writePacketSize = 3; */ public int getWritePacketSize() { return writePacketSize_; } /** * required uint32 writePacketSize = 3; */ public Builder setWritePacketSize(int value) { bitField0_ |= 0x00000004; writePacketSize_ = value; onChanged(); return this; } /** * required uint32 writePacketSize = 3; */ public Builder clearWritePacketSize() { bitField0_ = (bitField0_ & ~0x00000004); writePacketSize_ = 0; onChanged(); return this; } // required uint32 replication = 4; private int replication_ ; /** * required uint32 replication = 4; * *
       * Actually a short - only 16 bits used
       * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 replication = 4; * *
       * Actually a short - only 16 bits used
       * 
*/ public int getReplication() { return replication_; } /** * required uint32 replication = 4; * *
       * Actually a short - only 16 bits used
       * 
*/ public Builder setReplication(int value) { bitField0_ |= 0x00000008; replication_ = value; onChanged(); return this; } /** * required uint32 replication = 4; * *
       * Actually a short - only 16 bits used
       * 
*/ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000008); replication_ = 0; onChanged(); return this; } // required uint32 fileBufferSize = 5; private int fileBufferSize_ ; /** * required uint32 fileBufferSize = 5; */ public boolean hasFileBufferSize() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint32 fileBufferSize = 5; */ public int getFileBufferSize() { return fileBufferSize_; } /** * required uint32 fileBufferSize = 5; */ public Builder setFileBufferSize(int value) { bitField0_ |= 0x00000010; fileBufferSize_ = value; onChanged(); return this; } /** * required uint32 fileBufferSize = 5; */ public Builder clearFileBufferSize() { bitField0_ = (bitField0_ & ~0x00000010); fileBufferSize_ = 0; onChanged(); return this; } // optional bool encryptDataTransfer = 6 [default = false]; private boolean encryptDataTransfer_ ; /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean hasEncryptDataTransfer() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public boolean getEncryptDataTransfer() { return encryptDataTransfer_; } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public Builder setEncryptDataTransfer(boolean value) { bitField0_ |= 0x00000020; encryptDataTransfer_ = value; onChanged(); return this; } /** * optional bool encryptDataTransfer = 6 [default = false]; */ public Builder clearEncryptDataTransfer() { bitField0_ = (bitField0_ & ~0x00000020); encryptDataTransfer_ = false; onChanged(); return this; } // optional uint64 trashInterval = 7 [default = 0]; private long trashInterval_ ; /** * optional uint64 trashInterval = 7 [default = 0]; */ public boolean hasTrashInterval() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 trashInterval = 7 [default = 0]; */ public long getTrashInterval() { return trashInterval_; } /** * optional uint64 trashInterval = 7 [default = 0]; */ public Builder setTrashInterval(long value) { bitField0_ |= 0x00000040; trashInterval_ = value; onChanged(); return this; } /** * optional uint64 trashInterval = 7 [default = 0]; */ public Builder clearTrashInterval() { bitField0_ = (bitField0_ & ~0x00000040); trashInterval_ = 0L; onChanged(); return this; } // optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32; /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public boolean hasChecksumType() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() { return checksumType_; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000080; checksumType_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; */ public Builder clearChecksumType() { bitField0_ = (bitField0_ & ~0x00000080); checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto) } static { defaultInstance = new FsServerDefaultsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto) } public interface DirectoryListingProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ int getPartialListingCount(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ java.util.List getPartialListingOrBuilderList(); /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index); // required uint32 remainingEntries = 2; /** * required uint32 remainingEntries = 2; */ boolean hasRemainingEntries(); /** * required uint32 remainingEntries = 2; */ int getRemainingEntries(); } /** * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} * *
   **
   * Directory listing
   * 
*/ public static final class DirectoryListingProto extends com.google.protobuf.GeneratedMessage implements DirectoryListingProtoOrBuilder { // Use DirectoryListingProto.newBuilder() to construct. private DirectoryListingProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DirectoryListingProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DirectoryListingProto defaultInstance; public static DirectoryListingProto getDefaultInstance() { return defaultInstance; } public DirectoryListingProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DirectoryListingProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { partialListing_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } partialListing_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; remainingEntries_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public DirectoryListingProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DirectoryListingProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; public static final int PARTIALLISTING_FIELD_NUMBER = 1; private java.util.List partialListing_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { return partialListing_; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { return partialListing_.size(); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { return partialListing_.get(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { return partialListing_.get(index); } // required uint32 remainingEntries = 2; public static final int REMAININGENTRIES_FIELD_NUMBER = 2; private int remainingEntries_; /** * required uint32 remainingEntries = 2; */ public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 remainingEntries = 2; */ public int getRemainingEntries() { return remainingEntries_; } private void initFields() { partialListing_ = java.util.Collections.emptyList(); remainingEntries_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasRemainingEntries()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < partialListing_.size(); i++) { output.writeMessage(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(2, remainingEntries_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < partialListing_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, partialListing_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, remainingEntries_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj; boolean result = true; result = result && getPartialListingList() .equals(other.getPartialListingList()); result = result && (hasRemainingEntries() == other.hasRemainingEntries()); if (hasRemainingEntries()) { result = result && (getRemainingEntries() == other.getRemainingEntries()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getPartialListingCount() > 0) { hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; hash = (53 * hash) + getPartialListingList().hashCode(); } if (hasRemainingEntries()) { hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER; hash = (53 * hash) + getRemainingEntries(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DirectoryListingProto} * *
     **
     * Directory listing
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPartialListingFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { partialListingBuilder_.clear(); } remainingEntries_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (partialListingBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { partialListing_ = java.util.Collections.unmodifiableList(partialListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.partialListing_ = partialListing_; } else { result.partialListing_ = partialListingBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.remainingEntries_ = remainingEntries_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this; if (partialListingBuilder_ == null) { if (!other.partialListing_.isEmpty()) { if (partialListing_.isEmpty()) { partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePartialListingIsMutable(); partialListing_.addAll(other.partialListing_); } onChanged(); } } else { if (!other.partialListing_.isEmpty()) { if (partialListingBuilder_.isEmpty()) { partialListingBuilder_.dispose(); partialListingBuilder_ = null; partialListing_ = other.partialListing_; bitField0_ = (bitField0_ & ~0x00000001); partialListingBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getPartialListingFieldBuilder() : null; } else { partialListingBuilder_.addAllMessages(other.partialListing_); } } } if (other.hasRemainingEntries()) { setRemainingEntries(other.getRemainingEntries()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasRemainingEntries()) { return false; } for (int i = 0; i < getPartialListingCount(); i++) { if (!getPartialListing(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; private java.util.List partialListing_ = java.util.Collections.emptyList(); private void ensurePartialListingIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { partialListing_ = new java.util.ArrayList(partialListing_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingList() { if (partialListingBuilder_ == null) { return java.util.Collections.unmodifiableList(partialListing_); } else { return partialListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public int getPartialListingCount() { if (partialListingBuilder_ == null) { return partialListing_.size(); } else { return partialListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.set(index, value); onChanged(); } else { partialListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder setPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.set(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(value); onChanged(); } else { partialListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (partialListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePartialListingIsMutable(); partialListing_.add(index, value); onChanged(); } else { partialListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addPartialListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.add(index, builderForValue.build()); onChanged(); } else { partialListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder addAllPartialListing( java.lang.Iterable values) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); super.addAll(values, partialListing_); onChanged(); } else { partialListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder clearPartialListing() { if (partialListingBuilder_ == null) { partialListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { partialListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public Builder removePartialListing(int index) { if (partialListingBuilder_ == null) { ensurePartialListingIsMutable(); partialListing_.remove(index); onChanged(); } else { partialListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( int index) { return getPartialListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( int index) { if (partialListingBuilder_ == null) { return partialListing_.get(index); } else { return partialListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingOrBuilderList() { if (partialListingBuilder_ != null) { return partialListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(partialListing_); } } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { return getPartialListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( int index) { return getPartialListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1; */ public java.util.List getPartialListingBuilderList() { return getPartialListingFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getPartialListingFieldBuilder() { if (partialListingBuilder_ == null) { partialListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( partialListing_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); partialListing_ = null; } return partialListingBuilder_; } // required uint32 remainingEntries = 2; private int remainingEntries_ ; /** * required uint32 remainingEntries = 2; */ public boolean hasRemainingEntries() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 remainingEntries = 2; */ public int getRemainingEntries() { return remainingEntries_; } /** * required uint32 remainingEntries = 2; */ public Builder setRemainingEntries(int value) { bitField0_ |= 0x00000002; remainingEntries_ = value; onChanged(); return this; } /** * required uint32 remainingEntries = 2; */ public Builder clearRemainingEntries() { bitField0_ = (bitField0_ & ~0x00000002); remainingEntries_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DirectoryListingProto) } static { defaultInstance = new DirectoryListingProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DirectoryListingProto) } public interface SnapshottableDirectoryStatusProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ boolean hasDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus(); /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder(); // required uint32 snapshot_quota = 2; /** * required uint32 snapshot_quota = 2; * *
     * Fields specific for snapshottable directory
     * 
*/ boolean hasSnapshotQuota(); /** * required uint32 snapshot_quota = 2; * *
     * Fields specific for snapshottable directory
     * 
*/ int getSnapshotQuota(); // required uint32 snapshot_number = 3; /** * required uint32 snapshot_number = 3; */ boolean hasSnapshotNumber(); /** * required uint32 snapshot_number = 3; */ int getSnapshotNumber(); // required bytes parent_fullpath = 4; /** * required bytes parent_fullpath = 4; */ boolean hasParentFullpath(); /** * required bytes parent_fullpath = 4; */ com.google.protobuf.ByteString getParentFullpath(); } /** * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} * *
   **
   * Status of a snapshottable directory: besides the normal information for 
   * a directory status, also include snapshot quota, number of snapshots, and
   * the full path of the parent directory. 
   * 
*/ public static final class SnapshottableDirectoryStatusProto extends com.google.protobuf.GeneratedMessage implements SnapshottableDirectoryStatusProtoOrBuilder { // Use SnapshottableDirectoryStatusProto.newBuilder() to construct. private SnapshottableDirectoryStatusProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SnapshottableDirectoryStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SnapshottableDirectoryStatusProto defaultInstance; public static SnapshottableDirectoryStatusProto getDefaultInstance() { return defaultInstance; } public SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshottableDirectoryStatusProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = dirStatus_.toBuilder(); } dirStatus_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(dirStatus_); dirStatus_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; snapshotQuota_ = input.readUInt32(); break; } case 24: { bitField0_ |= 0x00000004; snapshotNumber_ = input.readUInt32(); break; } case 34: { bitField0_ |= 0x00000008; parentFullpath_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SnapshottableDirectoryStatusProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SnapshottableDirectoryStatusProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; public static final int DIRSTATUS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { return dirStatus_; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { return dirStatus_; } // required uint32 snapshot_quota = 2; public static final int SNAPSHOT_QUOTA_FIELD_NUMBER = 2; private int snapshotQuota_; /** * required uint32 snapshot_quota = 2; * *
     * Fields specific for snapshottable directory
     * 
*/ public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 snapshot_quota = 2; * *
     * Fields specific for snapshottable directory
     * 
*/ public int getSnapshotQuota() { return snapshotQuota_; } // required uint32 snapshot_number = 3; public static final int SNAPSHOT_NUMBER_FIELD_NUMBER = 3; private int snapshotNumber_; /** * required uint32 snapshot_number = 3; */ public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 snapshot_number = 3; */ public int getSnapshotNumber() { return snapshotNumber_; } // required bytes parent_fullpath = 4; public static final int PARENT_FULLPATH_FIELD_NUMBER = 4; private com.google.protobuf.ByteString parentFullpath_; /** * required bytes parent_fullpath = 4; */ public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes parent_fullpath = 4; */ public com.google.protobuf.ByteString getParentFullpath() { return parentFullpath_; } private void initFields() { dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); snapshotQuota_ = 0; snapshotNumber_ = 0; parentFullpath_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasDirStatus()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotQuota()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotNumber()) { memoizedIsInitialized = 0; return false; } if (!hasParentFullpath()) { memoizedIsInitialized = 0; return false; } if (!getDirStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, dirStatus_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, parentFullpath_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, dirStatus_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, snapshotQuota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(3, snapshotNumber_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, parentFullpath_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) obj; boolean result = true; result = result && (hasDirStatus() == other.hasDirStatus()); if (hasDirStatus()) { result = result && getDirStatus() .equals(other.getDirStatus()); } result = result && (hasSnapshotQuota() == other.hasSnapshotQuota()); if (hasSnapshotQuota()) { result = result && (getSnapshotQuota() == other.getSnapshotQuota()); } result = result && (hasSnapshotNumber() == other.hasSnapshotNumber()); if (hasSnapshotNumber()) { result = result && (getSnapshotNumber() == other.getSnapshotNumber()); } result = result && (hasParentFullpath() == other.hasParentFullpath()); if (hasParentFullpath()) { result = result && getParentFullpath() .equals(other.getParentFullpath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDirStatus()) { hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER; hash = (53 * hash) + getDirStatus().hashCode(); } if (hasSnapshotQuota()) { hash = (37 * hash) + SNAPSHOT_QUOTA_FIELD_NUMBER; hash = (53 * hash) + getSnapshotQuota(); } if (hasSnapshotNumber()) { hash = (37 * hash) + SNAPSHOT_NUMBER_FIELD_NUMBER; hash = (53 * hash) + getSnapshotNumber(); } if (hasParentFullpath()) { hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getParentFullpath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto} * *
     **
     * Status of a snapshottable directory: besides the normal information for 
     * a directory status, also include snapshot quota, number of snapshots, and
     * the full path of the parent directory. 
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDirStatusFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (dirStatusBuilder_ == null) { dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { dirStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); snapshotQuota_ = 0; bitField0_ = (bitField0_ & ~0x00000002); snapshotNumber_ = 0; bitField0_ = (bitField0_ & ~0x00000004); parentFullpath_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (dirStatusBuilder_ == null) { result.dirStatus_ = dirStatus_; } else { result.dirStatus_ = dirStatusBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.snapshotQuota_ = snapshotQuota_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.snapshotNumber_ = snapshotNumber_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.parentFullpath_ = parentFullpath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()) return this; if (other.hasDirStatus()) { mergeDirStatus(other.getDirStatus()); } if (other.hasSnapshotQuota()) { setSnapshotQuota(other.getSnapshotQuota()); } if (other.hasSnapshotNumber()) { setSnapshotNumber(other.getSnapshotNumber()); } if (other.hasParentFullpath()) { setParentFullpath(other.getParentFullpath()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasDirStatus()) { return false; } if (!hasSnapshotQuota()) { return false; } if (!hasSnapshotNumber()) { return false; } if (!hasParentFullpath()) { return false; } if (!getDirStatus().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_; /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public boolean hasDirStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() { if (dirStatusBuilder_ == null) { return dirStatus_; } else { return dirStatusBuilder_.getMessage(); } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirStatus_ = value; onChanged(); } else { dirStatusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder setDirStatus( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (dirStatusBuilder_ == null) { dirStatus_ = builderForValue.build(); onChanged(); } else { dirStatusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (dirStatusBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(dirStatus_).mergeFrom(value).buildPartial(); } else { dirStatus_ = value; } onChanged(); } else { dirStatusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public Builder clearDirStatus() { if (dirStatusBuilder_ == null) { dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { dirStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDirStatusFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() { if (dirStatusBuilder_ != null) { return dirStatusBuilder_.getMessageOrBuilder(); } else { return dirStatus_; } } /** * required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getDirStatusFieldBuilder() { if (dirStatusBuilder_ == null) { dirStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( dirStatus_, getParentForChildren(), isClean()); dirStatus_ = null; } return dirStatusBuilder_; } // required uint32 snapshot_quota = 2; private int snapshotQuota_ ; /** * required uint32 snapshot_quota = 2; * *
       * Fields specific for snapshottable directory
       * 
*/ public boolean hasSnapshotQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 snapshot_quota = 2; * *
       * Fields specific for snapshottable directory
       * 
*/ public int getSnapshotQuota() { return snapshotQuota_; } /** * required uint32 snapshot_quota = 2; * *
       * Fields specific for snapshottable directory
       * 
*/ public Builder setSnapshotQuota(int value) { bitField0_ |= 0x00000002; snapshotQuota_ = value; onChanged(); return this; } /** * required uint32 snapshot_quota = 2; * *
       * Fields specific for snapshottable directory
       * 
*/ public Builder clearSnapshotQuota() { bitField0_ = (bitField0_ & ~0x00000002); snapshotQuota_ = 0; onChanged(); return this; } // required uint32 snapshot_number = 3; private int snapshotNumber_ ; /** * required uint32 snapshot_number = 3; */ public boolean hasSnapshotNumber() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 snapshot_number = 3; */ public int getSnapshotNumber() { return snapshotNumber_; } /** * required uint32 snapshot_number = 3; */ public Builder setSnapshotNumber(int value) { bitField0_ |= 0x00000004; snapshotNumber_ = value; onChanged(); return this; } /** * required uint32 snapshot_number = 3; */ public Builder clearSnapshotNumber() { bitField0_ = (bitField0_ & ~0x00000004); snapshotNumber_ = 0; onChanged(); return this; } // required bytes parent_fullpath = 4; private com.google.protobuf.ByteString parentFullpath_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes parent_fullpath = 4; */ public boolean hasParentFullpath() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bytes parent_fullpath = 4; */ public com.google.protobuf.ByteString getParentFullpath() { return parentFullpath_; } /** * required bytes parent_fullpath = 4; */ public Builder setParentFullpath(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; parentFullpath_ = value; onChanged(); return this; } /** * required bytes parent_fullpath = 4; */ public Builder clearParentFullpath() { bitField0_ = (bitField0_ & ~0x00000008); parentFullpath_ = getDefaultInstance().getParentFullpath(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) } static { defaultInstance = new SnapshottableDirectoryStatusProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto) } public interface SnapshottableDirectoryListingProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ int getSnapshottableDirListingCount(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ java.util.List getSnapshottableDirListingOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} * *
   **
   * Snapshottable directory listing
   * 
*/ public static final class SnapshottableDirectoryListingProto extends com.google.protobuf.GeneratedMessage implements SnapshottableDirectoryListingProtoOrBuilder { // Use SnapshottableDirectoryListingProto.newBuilder() to construct. private SnapshottableDirectoryListingProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SnapshottableDirectoryListingProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SnapshottableDirectoryListingProto defaultInstance; public static SnapshottableDirectoryListingProto getDefaultInstance() { return defaultInstance; } public SnapshottableDirectoryListingProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshottableDirectoryListingProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { snapshottableDirListing_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } snapshottableDirListing_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SnapshottableDirectoryListingProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SnapshottableDirectoryListingProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; public static final int SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER = 1; private java.util.List snapshottableDirListing_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingOrBuilderList() { return snapshottableDirListing_; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public int getSnapshottableDirListingCount() { return snapshottableDirListing_.size(); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { return snapshottableDirListing_.get(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { return snapshottableDirListing_.get(index); } private void initFields() { snapshottableDirListing_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < snapshottableDirListing_.size(); i++) { output.writeMessage(1, snapshottableDirListing_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < snapshottableDirListing_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, snapshottableDirListing_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) obj; boolean result = true; result = result && getSnapshottableDirListingList() .equals(other.getSnapshottableDirListingList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getSnapshottableDirListingCount() > 0) { hash = (37 * hash) + SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER; hash = (53 * hash) + getSnapshottableDirListingList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto} * *
     **
     * Snapshottable directory listing
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSnapshottableDirListingFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { snapshottableDirListingBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(this); int from_bitField0_ = bitField0_; if (snapshottableDirListingBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_); bitField0_ = (bitField0_ & ~0x00000001); } result.snapshottableDirListing_ = snapshottableDirListing_; } else { result.snapshottableDirListing_ = snapshottableDirListingBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) return this; if (snapshottableDirListingBuilder_ == null) { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListing_.isEmpty()) { snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.addAll(other.snapshottableDirListing_); } onChanged(); } } else { if (!other.snapshottableDirListing_.isEmpty()) { if (snapshottableDirListingBuilder_.isEmpty()) { snapshottableDirListingBuilder_.dispose(); snapshottableDirListingBuilder_ = null; snapshottableDirListing_ = other.snapshottableDirListing_; bitField0_ = (bitField0_ & ~0x00000001); snapshottableDirListingBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getSnapshottableDirListingFieldBuilder() : null; } else { snapshottableDirListingBuilder_.addAllMessages(other.snapshottableDirListing_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getSnapshottableDirListingCount(); i++) { if (!getSnapshottableDirListing(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; private java.util.List snapshottableDirListing_ = java.util.Collections.emptyList(); private void ensureSnapshottableDirListingIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { snapshottableDirListing_ = new java.util.ArrayList(snapshottableDirListing_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> snapshottableDirListingBuilder_; /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingList() { if (snapshottableDirListingBuilder_ == null) { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } else { return snapshottableDirListingBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public int getSnapshottableDirListingCount() { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.size(); } else { return snapshottableDirListingBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, value); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder setSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.set(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) { if (snapshottableDirListingBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, value); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addSnapshottableDirListing( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.add(index, builderForValue.build()); onChanged(); } else { snapshottableDirListingBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder addAllSnapshottableDirListing( java.lang.Iterable values) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); super.addAll(values, snapshottableDirListing_); onChanged(); } else { snapshottableDirListingBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder clearSnapshottableDirListing() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListing_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { snapshottableDirListingBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public Builder removeSnapshottableDirListing(int index) { if (snapshottableDirListingBuilder_ == null) { ensureSnapshottableDirListingIsMutable(); snapshottableDirListing_.remove(index); onChanged(); } else { snapshottableDirListingBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder getSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder( int index) { if (snapshottableDirListingBuilder_ == null) { return snapshottableDirListing_.get(index); } else { return snapshottableDirListingBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingOrBuilderList() { if (snapshottableDirListingBuilder_ != null) { return snapshottableDirListingBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(snapshottableDirListing_); } } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder() { return getSnapshottableDirListingFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder( int index) { return getSnapshottableDirListingFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1; */ public java.util.List getSnapshottableDirListingBuilderList() { return getSnapshottableDirListingFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> getSnapshottableDirListingFieldBuilder() { if (snapshottableDirListingBuilder_ == null) { snapshottableDirListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>( snapshottableDirListing_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); snapshottableDirListing_ = null; } return snapshottableDirListingBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) } static { defaultInstance = new SnapshottableDirectoryListingProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryListingProto) } public interface SnapshotDiffReportEntryProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bytes fullpath = 1; /** * required bytes fullpath = 1; */ boolean hasFullpath(); /** * required bytes fullpath = 1; */ com.google.protobuf.ByteString getFullpath(); // required string modificationLabel = 2; /** * required string modificationLabel = 2; */ boolean hasModificationLabel(); /** * required string modificationLabel = 2; */ java.lang.String getModificationLabel(); /** * required string modificationLabel = 2; */ com.google.protobuf.ByteString getModificationLabelBytes(); // optional bytes targetPath = 3; /** * optional bytes targetPath = 3; */ boolean hasTargetPath(); /** * optional bytes targetPath = 3; */ com.google.protobuf.ByteString getTargetPath(); } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} * *
   **
   * Snapshot diff report entry
   * 
*/ public static final class SnapshotDiffReportEntryProto extends com.google.protobuf.GeneratedMessage implements SnapshotDiffReportEntryProtoOrBuilder { // Use SnapshotDiffReportEntryProto.newBuilder() to construct. private SnapshotDiffReportEntryProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SnapshotDiffReportEntryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SnapshotDiffReportEntryProto defaultInstance; public static SnapshotDiffReportEntryProto getDefaultInstance() { return defaultInstance; } public SnapshotDiffReportEntryProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportEntryProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; fullpath_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; modificationLabel_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; targetPath_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SnapshotDiffReportEntryProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportEntryProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bytes fullpath = 1; public static final int FULLPATH_FIELD_NUMBER = 1; private com.google.protobuf.ByteString fullpath_; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes fullpath = 1; */ public com.google.protobuf.ByteString getFullpath() { return fullpath_; } // required string modificationLabel = 2; public static final int MODIFICATIONLABEL_FIELD_NUMBER = 2; private java.lang.Object modificationLabel_; /** * required string modificationLabel = 2; */ public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string modificationLabel = 2; */ public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { modificationLabel_ = s; } return s; } } /** * required string modificationLabel = 2; */ public com.google.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional bytes targetPath = 3; public static final int TARGETPATH_FIELD_NUMBER = 3; private com.google.protobuf.ByteString targetPath_; /** * optional bytes targetPath = 3; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes targetPath = 3; */ public com.google.protobuf.ByteString getTargetPath() { return targetPath_; } private void initFields() { fullpath_ = com.google.protobuf.ByteString.EMPTY; modificationLabel_ = ""; targetPath_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasFullpath()) { memoizedIsInitialized = 0; return false; } if (!hasModificationLabel()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, fullpath_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getModificationLabelBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, targetPath_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, fullpath_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getModificationLabelBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, targetPath_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) obj; boolean result = true; result = result && (hasFullpath() == other.hasFullpath()); if (hasFullpath()) { result = result && getFullpath() .equals(other.getFullpath()); } result = result && (hasModificationLabel() == other.hasModificationLabel()); if (hasModificationLabel()) { result = result && getModificationLabel() .equals(other.getModificationLabel()); } result = result && (hasTargetPath() == other.hasTargetPath()); if (hasTargetPath()) { result = result && getTargetPath() .equals(other.getTargetPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFullpath()) { hash = (37 * hash) + FULLPATH_FIELD_NUMBER; hash = (53 * hash) + getFullpath().hashCode(); } if (hasModificationLabel()) { hash = (37 * hash) + MODIFICATIONLABEL_FIELD_NUMBER; hash = (53 * hash) + getModificationLabel().hashCode(); } if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto} * *
     **
     * Snapshot diff report entry
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); fullpath_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); modificationLabel_ = ""; bitField0_ = (bitField0_ & ~0x00000002); targetPath_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.fullpath_ = fullpath_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.modificationLabel_ = modificationLabel_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.targetPath_ = targetPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()) return this; if (other.hasFullpath()) { setFullpath(other.getFullpath()); } if (other.hasModificationLabel()) { bitField0_ |= 0x00000002; modificationLabel_ = other.modificationLabel_; onChanged(); } if (other.hasTargetPath()) { setTargetPath(other.getTargetPath()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasFullpath()) { return false; } if (!hasModificationLabel()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bytes fullpath = 1; private com.google.protobuf.ByteString fullpath_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes fullpath = 1; */ public boolean hasFullpath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes fullpath = 1; */ public com.google.protobuf.ByteString getFullpath() { return fullpath_; } /** * required bytes fullpath = 1; */ public Builder setFullpath(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; fullpath_ = value; onChanged(); return this; } /** * required bytes fullpath = 1; */ public Builder clearFullpath() { bitField0_ = (bitField0_ & ~0x00000001); fullpath_ = getDefaultInstance().getFullpath(); onChanged(); return this; } // required string modificationLabel = 2; private java.lang.Object modificationLabel_ = ""; /** * required string modificationLabel = 2; */ public boolean hasModificationLabel() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string modificationLabel = 2; */ public java.lang.String getModificationLabel() { java.lang.Object ref = modificationLabel_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); modificationLabel_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string modificationLabel = 2; */ public com.google.protobuf.ByteString getModificationLabelBytes() { java.lang.Object ref = modificationLabel_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); modificationLabel_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string modificationLabel = 2; */ public Builder setModificationLabel( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; modificationLabel_ = value; onChanged(); return this; } /** * required string modificationLabel = 2; */ public Builder clearModificationLabel() { bitField0_ = (bitField0_ & ~0x00000002); modificationLabel_ = getDefaultInstance().getModificationLabel(); onChanged(); return this; } /** * required string modificationLabel = 2; */ public Builder setModificationLabelBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; modificationLabel_ = value; onChanged(); return this; } // optional bytes targetPath = 3; private com.google.protobuf.ByteString targetPath_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes targetPath = 3; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes targetPath = 3; */ public com.google.protobuf.ByteString getTargetPath() { return targetPath_; } /** * optional bytes targetPath = 3; */ public Builder setTargetPath(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; targetPath_ = value; onChanged(); return this; } /** * optional bytes targetPath = 3; */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000004); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) } static { defaultInstance = new SnapshotDiffReportEntryProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportEntryProto) } public interface SnapshotDiffReportProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ com.google.protobuf.ByteString getSnapshotRootBytes(); // required string fromSnapshot = 2; /** * required string fromSnapshot = 2; */ boolean hasFromSnapshot(); /** * required string fromSnapshot = 2; */ java.lang.String getFromSnapshot(); /** * required string fromSnapshot = 2; */ com.google.protobuf.ByteString getFromSnapshotBytes(); // required string toSnapshot = 3; /** * required string toSnapshot = 3; */ boolean hasToSnapshot(); /** * required string toSnapshot = 3; */ java.lang.String getToSnapshot(); /** * required string toSnapshot = 3; */ com.google.protobuf.ByteString getToSnapshotBytes(); // repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ int getDiffReportEntriesCount(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ java.util.List getDiffReportEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} * *
   **
   * Snapshot diff report
   * 
*/ public static final class SnapshotDiffReportProto extends com.google.protobuf.GeneratedMessage implements SnapshotDiffReportProtoOrBuilder { // Use SnapshotDiffReportProto.newBuilder() to construct. private SnapshotDiffReportProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SnapshotDiffReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SnapshotDiffReportProto defaultInstance; public static SnapshotDiffReportProto getDefaultInstance() { return defaultInstance; } public SnapshotDiffReportProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotDiffReportProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; fromSnapshot_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; toSnapshot_ = input.readBytes(); break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { diffReportEntries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } diffReportEntries_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SnapshotDiffReportProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SnapshotDiffReportProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; * *
     * full path of the directory where snapshots were taken
     * 
*/ public com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string fromSnapshot = 2; public static final int FROMSNAPSHOT_FIELD_NUMBER = 2; private java.lang.Object fromSnapshot_; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } } /** * required string fromSnapshot = 2; */ public com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string toSnapshot = 3; public static final int TOSNAPSHOT_FIELD_NUMBER = 3; private java.lang.Object toSnapshot_; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } } /** * required string toSnapshot = 3; */ public com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; public static final int DIFFREPORTENTRIES_FIELD_NUMBER = 4; private java.util.List diffReportEntries_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesOrBuilderList() { return diffReportEntries_; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public int getDiffReportEntriesCount() { return diffReportEntries_.size(); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { return diffReportEntries_.get(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { return diffReportEntries_.get(index); } private void initFields() { snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; diffReportEntries_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasFromSnapshot()) { memoizedIsInitialized = 0; return false; } if (!hasToSnapshot()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getToSnapshotBytes()); } for (int i = 0; i < diffReportEntries_.size(); i++) { output.writeMessage(4, diffReportEntries_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getToSnapshotBytes()); } for (int i = 0; i < diffReportEntries_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, diffReportEntries_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasFromSnapshot() == other.hasFromSnapshot()); if (hasFromSnapshot()) { result = result && getFromSnapshot() .equals(other.getFromSnapshot()); } result = result && (hasToSnapshot() == other.hasToSnapshot()); if (hasToSnapshot()) { result = result && getToSnapshot() .equals(other.getToSnapshot()); } result = result && getDiffReportEntriesList() .equals(other.getDiffReportEntriesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasFromSnapshot()) { hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getFromSnapshot().hashCode(); } if (hasToSnapshot()) { hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getToSnapshot().hashCode(); } if (getDiffReportEntriesCount() > 0) { hash = (37 * hash) + DIFFREPORTENTRIES_FIELD_NUMBER; hash = (53 * hash) + getDiffReportEntriesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto} * *
     **
     * Snapshot diff report
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDiffReportEntriesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); fromSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); toSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { diffReportEntriesBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fromSnapshot_ = fromSnapshot_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.toSnapshot_ = toSnapshot_; if (diffReportEntriesBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008)) { diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_); bitField0_ = (bitField0_ & ~0x00000008); } result.diffReportEntries_ = diffReportEntries_; } else { result.diffReportEntries_ = diffReportEntriesBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasFromSnapshot()) { bitField0_ |= 0x00000002; fromSnapshot_ = other.fromSnapshot_; onChanged(); } if (other.hasToSnapshot()) { bitField0_ |= 0x00000004; toSnapshot_ = other.toSnapshot_; onChanged(); } if (diffReportEntriesBuilder_ == null) { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntries_.isEmpty()) { diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureDiffReportEntriesIsMutable(); diffReportEntries_.addAll(other.diffReportEntries_); } onChanged(); } } else { if (!other.diffReportEntries_.isEmpty()) { if (diffReportEntriesBuilder_.isEmpty()) { diffReportEntriesBuilder_.dispose(); diffReportEntriesBuilder_ = null; diffReportEntries_ = other.diffReportEntries_; bitField0_ = (bitField0_ & ~0x00000008); diffReportEntriesBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getDiffReportEntriesFieldBuilder() : null; } else { diffReportEntriesBuilder_.addAllMessages(other.diffReportEntries_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasFromSnapshot()) { return false; } if (!hasToSnapshot()) { return false; } for (int i = 0; i < getDiffReportEntriesCount(); i++) { if (!getDiffReportEntries(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; * *
       * full path of the directory where snapshots were taken
       * 
*/ public Builder setSnapshotRootBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // required string fromSnapshot = 2; private java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); fromSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string fromSnapshot = 2; */ public com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder clearFromSnapshot() { bitField0_ = (bitField0_ & ~0x00000002); fromSnapshot_ = getDefaultInstance().getFromSnapshot(); onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshotBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } // required string toSnapshot = 3; private java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); toSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string toSnapshot = 3; */ public com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string toSnapshot = 3; */ public Builder setToSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder clearToSnapshot() { bitField0_ = (bitField0_ & ~0x00000004); toSnapshot_ = getDefaultInstance().getToSnapshot(); onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder setToSnapshotBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } // repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; private java.util.List diffReportEntries_ = java.util.Collections.emptyList(); private void ensureDiffReportEntriesIsMutable() { if (!((bitField0_ & 0x00000008) == 0x00000008)) { diffReportEntries_ = new java.util.ArrayList(diffReportEntries_); bitField0_ |= 0x00000008; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> diffReportEntriesBuilder_; /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesList() { if (diffReportEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(diffReportEntries_); } else { return diffReportEntriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public int getDiffReportEntriesCount() { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.size(); } else { return diffReportEntriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, value); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder setDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.set(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) { if (diffReportEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, value); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addDiffReportEntries( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.add(index, builderForValue.build()); onChanged(); } else { diffReportEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder addAllDiffReportEntries( java.lang.Iterable values) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); super.addAll(values, diffReportEntries_); onChanged(); } else { diffReportEntriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder clearDiffReportEntries() { if (diffReportEntriesBuilder_ == null) { diffReportEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { diffReportEntriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public Builder removeDiffReportEntries(int index) { if (diffReportEntriesBuilder_ == null) { ensureDiffReportEntriesIsMutable(); diffReportEntries_.remove(index); onChanged(); } else { diffReportEntriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder getDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder( int index) { if (diffReportEntriesBuilder_ == null) { return diffReportEntries_.get(index); } else { return diffReportEntriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesOrBuilderList() { if (diffReportEntriesBuilder_ != null) { return diffReportEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(diffReportEntries_); } } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder() { return getDiffReportEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder( int index) { return getDiffReportEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4; */ public java.util.List getDiffReportEntriesBuilderList() { return getDiffReportEntriesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> getDiffReportEntriesFieldBuilder() { if (diffReportEntriesBuilder_ == null) { diffReportEntriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>( diffReportEntries_, ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); diffReportEntries_ = null; } return diffReportEntriesBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportProto) } static { defaultInstance = new SnapshotDiffReportProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportProto) } public interface StorageInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 layoutVersion = 1; /** * required uint32 layoutVersion = 1; * *
     * Layout version of the file system
     * 
*/ boolean hasLayoutVersion(); /** * required uint32 layoutVersion = 1; * *
     * Layout version of the file system
     * 
*/ int getLayoutVersion(); // required uint32 namespceID = 2; /** * required uint32 namespceID = 2; * *
     * File system namespace ID
     * 
*/ boolean hasNamespceID(); /** * required uint32 namespceID = 2; * *
     * File system namespace ID
     * 
*/ int getNamespceID(); // required string clusterID = 3; /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ boolean hasClusterID(); /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ java.lang.String getClusterID(); /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ com.google.protobuf.ByteString getClusterIDBytes(); // required uint64 cTime = 4; /** * required uint64 cTime = 4; * *
     * File system creation time
     * 
*/ boolean hasCTime(); /** * required uint64 cTime = 4; * *
     * File system creation time
     * 
*/ long getCTime(); } /** * Protobuf type {@code hadoop.hdfs.StorageInfoProto} * *
   **
   * Common node information shared by all the nodes in the cluster
   * 
*/ public static final class StorageInfoProto extends com.google.protobuf.GeneratedMessage implements StorageInfoProtoOrBuilder { // Use StorageInfoProto.newBuilder() to construct. private StorageInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StorageInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StorageInfoProto defaultInstance; public static StorageInfoProto getDefaultInstance() { return defaultInstance; } public StorageInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StorageInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; layoutVersion_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; namespceID_ = input.readUInt32(); break; } case 26: { bitField0_ |= 0x00000004; clusterID_ = input.readBytes(); break; } case 32: { bitField0_ |= 0x00000008; cTime_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public StorageInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StorageInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 layoutVersion = 1; public static final int LAYOUTVERSION_FIELD_NUMBER = 1; private int layoutVersion_; /** * required uint32 layoutVersion = 1; * *
     * Layout version of the file system
     * 
*/ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 layoutVersion = 1; * *
     * Layout version of the file system
     * 
*/ public int getLayoutVersion() { return layoutVersion_; } // required uint32 namespceID = 2; public static final int NAMESPCEID_FIELD_NUMBER = 2; private int namespceID_; /** * required uint32 namespceID = 2; * *
     * File system namespace ID
     * 
*/ public boolean hasNamespceID() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 namespceID = 2; * *
     * File system namespace ID
     * 
*/ public int getNamespceID() { return namespceID_; } // required string clusterID = 3; public static final int CLUSTERID_FIELD_NUMBER = 3; private java.lang.Object clusterID_; /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ public boolean hasClusterID() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ public java.lang.String getClusterID() { java.lang.Object ref = clusterID_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clusterID_ = s; } return s; } } /** * required string clusterID = 3; * *
     * ID of the cluster
     * 
*/ public com.google.protobuf.ByteString getClusterIDBytes() { java.lang.Object ref = clusterID_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clusterID_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint64 cTime = 4; public static final int CTIME_FIELD_NUMBER = 4; private long cTime_; /** * required uint64 cTime = 4; * *
     * File system creation time
     * 
*/ public boolean hasCTime() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 cTime = 4; * *
     * File system creation time
     * 
*/ public long getCTime() { return cTime_; } private void initFields() { layoutVersion_ = 0; namespceID_ = 0; clusterID_ = ""; cTime_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLayoutVersion()) { memoizedIsInitialized = 0; return false; } if (!hasNamespceID()) { memoizedIsInitialized = 0; return false; } if (!hasClusterID()) { memoizedIsInitialized = 0; return false; } if (!hasCTime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, layoutVersion_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, namespceID_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getClusterIDBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, cTime_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, layoutVersion_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, namespceID_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getClusterIDBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(4, cTime_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) obj; boolean result = true; result = result && (hasLayoutVersion() == other.hasLayoutVersion()); if (hasLayoutVersion()) { result = result && (getLayoutVersion() == other.getLayoutVersion()); } result = result && (hasNamespceID() == other.hasNamespceID()); if (hasNamespceID()) { result = result && (getNamespceID() == other.getNamespceID()); } result = result && (hasClusterID() == other.hasClusterID()); if (hasClusterID()) { result = result && getClusterID() .equals(other.getClusterID()); } result = result && (hasCTime() == other.hasCTime()); if (hasCTime()) { result = result && (getCTime() == other.getCTime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLayoutVersion()) { hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER; hash = (53 * hash) + getLayoutVersion(); } if (hasNamespceID()) { hash = (37 * hash) + NAMESPCEID_FIELD_NUMBER; hash = (53 * hash) + getNamespceID(); } if (hasClusterID()) { hash = (37 * hash) + CLUSTERID_FIELD_NUMBER; hash = (53 * hash) + getClusterID().hashCode(); } if (hasCTime()) { hash = (37 * hash) + CTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCTime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.StorageInfoProto} * *
     **
     * Common node information shared by all the nodes in the cluster
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); layoutVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000001); namespceID_ = 0; bitField0_ = (bitField0_ & ~0x00000002); clusterID_ = ""; bitField0_ = (bitField0_ & ~0x00000004); cTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.layoutVersion_ = layoutVersion_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.namespceID_ = namespceID_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.clusterID_ = clusterID_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.cTime_ = cTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) return this; if (other.hasLayoutVersion()) { setLayoutVersion(other.getLayoutVersion()); } if (other.hasNamespceID()) { setNamespceID(other.getNamespceID()); } if (other.hasClusterID()) { bitField0_ |= 0x00000004; clusterID_ = other.clusterID_; onChanged(); } if (other.hasCTime()) { setCTime(other.getCTime()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLayoutVersion()) { return false; } if (!hasNamespceID()) { return false; } if (!hasClusterID()) { return false; } if (!hasCTime()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 layoutVersion = 1; private int layoutVersion_ ; /** * required uint32 layoutVersion = 1; * *
       * Layout version of the file system
       * 
*/ public boolean hasLayoutVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 layoutVersion = 1; * *
       * Layout version of the file system
       * 
*/ public int getLayoutVersion() { return layoutVersion_; } /** * required uint32 layoutVersion = 1; * *
       * Layout version of the file system
       * 
*/ public Builder setLayoutVersion(int value) { bitField0_ |= 0x00000001; layoutVersion_ = value; onChanged(); return this; } /** * required uint32 layoutVersion = 1; * *
       * Layout version of the file system
       * 
*/ public Builder clearLayoutVersion() { bitField0_ = (bitField0_ & ~0x00000001); layoutVersion_ = 0; onChanged(); return this; } // required uint32 namespceID = 2; private int namespceID_ ; /** * required uint32 namespceID = 2; * *
       * File system namespace ID
       * 
*/ public boolean hasNamespceID() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 namespceID = 2; * *
       * File system namespace ID
       * 
*/ public int getNamespceID() { return namespceID_; } /** * required uint32 namespceID = 2; * *
       * File system namespace ID
       * 
*/ public Builder setNamespceID(int value) { bitField0_ |= 0x00000002; namespceID_ = value; onChanged(); return this; } /** * required uint32 namespceID = 2; * *
       * File system namespace ID
       * 
*/ public Builder clearNamespceID() { bitField0_ = (bitField0_ & ~0x00000002); namespceID_ = 0; onChanged(); return this; } // required string clusterID = 3; private java.lang.Object clusterID_ = ""; /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public boolean hasClusterID() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public java.lang.String getClusterID() { java.lang.Object ref = clusterID_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); clusterID_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public com.google.protobuf.ByteString getClusterIDBytes() { java.lang.Object ref = clusterID_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clusterID_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public Builder setClusterID( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clusterID_ = value; onChanged(); return this; } /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public Builder clearClusterID() { bitField0_ = (bitField0_ & ~0x00000004); clusterID_ = getDefaultInstance().getClusterID(); onChanged(); return this; } /** * required string clusterID = 3; * *
       * ID of the cluster
       * 
*/ public Builder setClusterIDBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clusterID_ = value; onChanged(); return this; } // required uint64 cTime = 4; private long cTime_ ; /** * required uint64 cTime = 4; * *
       * File system creation time
       * 
*/ public boolean hasCTime() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 cTime = 4; * *
       * File system creation time
       * 
*/ public long getCTime() { return cTime_; } /** * required uint64 cTime = 4; * *
       * File system creation time
       * 
*/ public Builder setCTime(long value) { bitField0_ |= 0x00000008; cTime_ = value; onChanged(); return this; } /** * required uint64 cTime = 4; * *
       * File system creation time
       * 
*/ public Builder clearCTime() { bitField0_ = (bitField0_ & ~0x00000008); cTime_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageInfoProto) } static { defaultInstance = new StorageInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageInfoProto) } public interface NamenodeRegistrationProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string rpcAddress = 1; /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ boolean hasRpcAddress(); /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ java.lang.String getRpcAddress(); /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ com.google.protobuf.ByteString getRpcAddressBytes(); // required string httpAddress = 2; /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ boolean hasHttpAddress(); /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ java.lang.String getHttpAddress(); /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ com.google.protobuf.ByteString getHttpAddressBytes(); // required .hadoop.hdfs.StorageInfoProto storageInfo = 3; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ boolean hasStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); // optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
     * Namenode role
     * 
*/ boolean hasRole(); /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
     * Namenode role
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole(); } /** * Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto} * *
   **
   * Information sent by a namenode to identify itself to the primary namenode.
   * 
*/ public static final class NamenodeRegistrationProto extends com.google.protobuf.GeneratedMessage implements NamenodeRegistrationProtoOrBuilder { // Use NamenodeRegistrationProto.newBuilder() to construct. private NamenodeRegistrationProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NamenodeRegistrationProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NamenodeRegistrationProto defaultInstance; public static NamenodeRegistrationProto getDefaultInstance() { return defaultInstance; } public NamenodeRegistrationProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NamenodeRegistrationProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; rpcAddress_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; httpAddress_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = storageInfo_.toBuilder(); } storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storageInfo_); storageInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 32: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(4, rawValue); } else { bitField0_ |= 0x00000008; role_ = value; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public NamenodeRegistrationProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NamenodeRegistrationProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } /** * Protobuf enum {@code hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto} */ public enum NamenodeRoleProto implements com.google.protobuf.ProtocolMessageEnum { /** * NAMENODE = 1; */ NAMENODE(0, 1), /** * BACKUP = 2; */ BACKUP(1, 2), /** * CHECKPOINT = 3; */ CHECKPOINT(2, 3), ; /** * NAMENODE = 1; */ public static final int NAMENODE_VALUE = 1; /** * BACKUP = 2; */ public static final int BACKUP_VALUE = 2; /** * CHECKPOINT = 3; */ public static final int CHECKPOINT_VALUE = 3; public final int getNumber() { return value; } public static NamenodeRoleProto valueOf(int value) { switch (value) { case 1: return NAMENODE; case 2: return BACKUP; case 3: return CHECKPOINT; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public NamenodeRoleProto findValueByNumber(int number) { return NamenodeRoleProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor().getEnumTypes().get(0); } private static final NamenodeRoleProto[] VALUES = values(); public static NamenodeRoleProto valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private NamenodeRoleProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto) } private int bitField0_; // required string rpcAddress = 1; public static final int RPCADDRESS_FIELD_NUMBER = 1; private java.lang.Object rpcAddress_; /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ public boolean hasRpcAddress() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ public java.lang.String getRpcAddress() { java.lang.Object ref = rpcAddress_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { rpcAddress_ = s; } return s; } } /** * required string rpcAddress = 1; * *
     * host:port of the namenode RPC address
     * 
*/ public com.google.protobuf.ByteString getRpcAddressBytes() { java.lang.Object ref = rpcAddress_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); rpcAddress_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string httpAddress = 2; public static final int HTTPADDRESS_FIELD_NUMBER = 2; private java.lang.Object httpAddress_; /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ public boolean hasHttpAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ public java.lang.String getHttpAddress() { java.lang.Object ref = httpAddress_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { httpAddress_ = s; } return s; } } /** * required string httpAddress = 2; * *
     * host:port of the namenode http server
     * 
*/ public com.google.protobuf.ByteString getHttpAddressBytes() { java.lang.Object ref = httpAddress_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); httpAddress_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.StorageInfoProto storageInfo = 3; public static final int STORAGEINFO_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { return storageInfo_; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
     * Node information
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { return storageInfo_; } // optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; public static final int ROLE_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_; /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
     * Namenode role
     * 
*/ public boolean hasRole() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
     * Namenode role
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { return role_; } private void initFields() { rpcAddress_ = ""; httpAddress_ = ""; storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasRpcAddress()) { memoizedIsInitialized = 0; return false; } if (!hasHttpAddress()) { memoizedIsInitialized = 0; return false; } if (!hasStorageInfo()) { memoizedIsInitialized = 0; return false; } if (!getStorageInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getRpcAddressBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getHttpAddressBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, storageInfo_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeEnum(4, role_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getRpcAddressBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getHttpAddressBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, storageInfo_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(4, role_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) obj; boolean result = true; result = result && (hasRpcAddress() == other.hasRpcAddress()); if (hasRpcAddress()) { result = result && getRpcAddress() .equals(other.getRpcAddress()); } result = result && (hasHttpAddress() == other.hasHttpAddress()); if (hasHttpAddress()) { result = result && getHttpAddress() .equals(other.getHttpAddress()); } result = result && (hasStorageInfo() == other.hasStorageInfo()); if (hasStorageInfo()) { result = result && getStorageInfo() .equals(other.getStorageInfo()); } result = result && (hasRole() == other.hasRole()); if (hasRole()) { result = result && (getRole() == other.getRole()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRpcAddress()) { hash = (37 * hash) + RPCADDRESS_FIELD_NUMBER; hash = (53 * hash) + getRpcAddress().hashCode(); } if (hasHttpAddress()) { hash = (37 * hash) + HTTPADDRESS_FIELD_NUMBER; hash = (53 * hash) + getHttpAddress().hashCode(); } if (hasStorageInfo()) { hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; hash = (53 * hash) + getStorageInfo().hashCode(); } if (hasRole()) { hash = (37 * hash) + ROLE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getRole()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto} * *
     **
     * Information sent by a namenode to identify itself to the primary namenode.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStorageInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); rpcAddress_ = ""; bitField0_ = (bitField0_ & ~0x00000001); httpAddress_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.rpcAddress_ = rpcAddress_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.httpAddress_ = httpAddress_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (storageInfoBuilder_ == null) { result.storageInfo_ = storageInfo_; } else { result.storageInfo_ = storageInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.role_ = role_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) return this; if (other.hasRpcAddress()) { bitField0_ |= 0x00000001; rpcAddress_ = other.rpcAddress_; onChanged(); } if (other.hasHttpAddress()) { bitField0_ |= 0x00000002; httpAddress_ = other.httpAddress_; onChanged(); } if (other.hasStorageInfo()) { mergeStorageInfo(other.getStorageInfo()); } if (other.hasRole()) { setRole(other.getRole()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasRpcAddress()) { return false; } if (!hasHttpAddress()) { return false; } if (!hasStorageInfo()) { return false; } if (!getStorageInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string rpcAddress = 1; private java.lang.Object rpcAddress_ = ""; /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public boolean hasRpcAddress() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public java.lang.String getRpcAddress() { java.lang.Object ref = rpcAddress_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); rpcAddress_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public com.google.protobuf.ByteString getRpcAddressBytes() { java.lang.Object ref = rpcAddress_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); rpcAddress_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public Builder setRpcAddress( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; rpcAddress_ = value; onChanged(); return this; } /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public Builder clearRpcAddress() { bitField0_ = (bitField0_ & ~0x00000001); rpcAddress_ = getDefaultInstance().getRpcAddress(); onChanged(); return this; } /** * required string rpcAddress = 1; * *
       * host:port of the namenode RPC address
       * 
*/ public Builder setRpcAddressBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; rpcAddress_ = value; onChanged(); return this; } // required string httpAddress = 2; private java.lang.Object httpAddress_ = ""; /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public boolean hasHttpAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public java.lang.String getHttpAddress() { java.lang.Object ref = httpAddress_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); httpAddress_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public com.google.protobuf.ByteString getHttpAddressBytes() { java.lang.Object ref = httpAddress_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); httpAddress_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public Builder setHttpAddress( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; httpAddress_ = value; onChanged(); return this; } /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public Builder clearHttpAddress() { bitField0_ = (bitField0_ & ~0x00000002); httpAddress_ = getDefaultInstance().getHttpAddress(); onChanged(); return this; } /** * required string httpAddress = 2; * *
       * host:port of the namenode http server
       * 
*/ public Builder setHttpAddressBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; httpAddress_ = value; onChanged(); return this; } // required .hadoop.hdfs.StorageInfoProto storageInfo = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { if (storageInfoBuilder_ == null) { return storageInfo_; } else { return storageInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storageInfo_ = value; onChanged(); } else { storageInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public Builder setStorageInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { if (storageInfoBuilder_ == null) { storageInfo_ = builderForValue.build(); onChanged(); } else { storageInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); } else { storageInfo_ = value; } onChanged(); } else { storageInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public Builder clearStorageInfo() { if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { bitField0_ |= 0x00000004; onChanged(); return getStorageInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { if (storageInfoBuilder_ != null) { return storageInfoBuilder_.getMessageOrBuilder(); } else { return storageInfo_; } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 3; * *
       * Node information
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> getStorageInfoFieldBuilder() { if (storageInfoBuilder_ == null) { storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( storageInfo_, getParentForChildren(), isClean()); storageInfo_ = null; } return storageInfoBuilder_; } // optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
       * Namenode role
       * 
*/ public boolean hasRole() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
       * Namenode role
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { return role_; } /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
       * Namenode role
       * 
*/ public Builder setRole(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; role_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE]; * *
       * Namenode role
       * 
*/ public Builder clearRole() { bitField0_ = (bitField0_ & ~0x00000008); role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamenodeRegistrationProto) } static { defaultInstance = new NamenodeRegistrationProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.NamenodeRegistrationProto) } public interface CheckpointSignatureProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string blockPoolId = 1; /** * required string blockPoolId = 1; */ boolean hasBlockPoolId(); /** * required string blockPoolId = 1; */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 1; */ com.google.protobuf.ByteString getBlockPoolIdBytes(); // required uint64 mostRecentCheckpointTxId = 2; /** * required uint64 mostRecentCheckpointTxId = 2; */ boolean hasMostRecentCheckpointTxId(); /** * required uint64 mostRecentCheckpointTxId = 2; */ long getMostRecentCheckpointTxId(); // required uint64 curSegmentTxId = 3; /** * required uint64 curSegmentTxId = 3; */ boolean hasCurSegmentTxId(); /** * required uint64 curSegmentTxId = 3; */ long getCurSegmentTxId(); // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ boolean hasStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto} * *
   **
   * Unique signature to identify checkpoint transactions.
   * 
*/ public static final class CheckpointSignatureProto extends com.google.protobuf.GeneratedMessage implements CheckpointSignatureProtoOrBuilder { // Use CheckpointSignatureProto.newBuilder() to construct. private CheckpointSignatureProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CheckpointSignatureProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CheckpointSignatureProto defaultInstance; public static CheckpointSignatureProto getDefaultInstance() { return defaultInstance; } public CheckpointSignatureProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CheckpointSignatureProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; blockPoolId_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; mostRecentCheckpointTxId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; curSegmentTxId_ = input.readUInt64(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = storageInfo_.toBuilder(); } storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storageInfo_); storageInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CheckpointSignatureProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CheckpointSignatureProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string blockPoolId = 1; public static final int BLOCKPOOLID_FIELD_NUMBER = 1; private java.lang.Object blockPoolId_; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 1; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint64 mostRecentCheckpointTxId = 2; public static final int MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER = 2; private long mostRecentCheckpointTxId_; /** * required uint64 mostRecentCheckpointTxId = 2; */ public boolean hasMostRecentCheckpointTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 mostRecentCheckpointTxId = 2; */ public long getMostRecentCheckpointTxId() { return mostRecentCheckpointTxId_; } // required uint64 curSegmentTxId = 3; public static final int CURSEGMENTTXID_FIELD_NUMBER = 3; private long curSegmentTxId_; /** * required uint64 curSegmentTxId = 3; */ public boolean hasCurSegmentTxId() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 curSegmentTxId = 3; */ public long getCurSegmentTxId() { return curSegmentTxId_; } // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; public static final int STORAGEINFO_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { return storageInfo_; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { return storageInfo_; } private void initFields() { blockPoolId_ = ""; mostRecentCheckpointTxId_ = 0L; curSegmentTxId_ = 0L; storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } if (!hasMostRecentCheckpointTxId()) { memoizedIsInitialized = 0; return false; } if (!hasCurSegmentTxId()) { memoizedIsInitialized = 0; return false; } if (!hasStorageInfo()) { memoizedIsInitialized = 0; return false; } if (!getStorageInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, mostRecentCheckpointTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, curSegmentTxId_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, storageInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, mostRecentCheckpointTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, curSegmentTxId_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, storageInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) obj; boolean result = true; result = result && (hasBlockPoolId() == other.hasBlockPoolId()); if (hasBlockPoolId()) { result = result && getBlockPoolId() .equals(other.getBlockPoolId()); } result = result && (hasMostRecentCheckpointTxId() == other.hasMostRecentCheckpointTxId()); if (hasMostRecentCheckpointTxId()) { result = result && (getMostRecentCheckpointTxId() == other.getMostRecentCheckpointTxId()); } result = result && (hasCurSegmentTxId() == other.hasCurSegmentTxId()); if (hasCurSegmentTxId()) { result = result && (getCurSegmentTxId() == other.getCurSegmentTxId()); } result = result && (hasStorageInfo() == other.hasStorageInfo()); if (hasStorageInfo()) { result = result && getStorageInfo() .equals(other.getStorageInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasMostRecentCheckpointTxId()) { hash = (37 * hash) + MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMostRecentCheckpointTxId()); } if (hasCurSegmentTxId()) { hash = (37 * hash) + CURSEGMENTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCurSegmentTxId()); } if (hasStorageInfo()) { hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; hash = (53 * hash) + getStorageInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto} * *
     **
     * Unique signature to identify checkpoint transactions.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStorageInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); mostRecentCheckpointTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); curSegmentTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.mostRecentCheckpointTxId_ = mostRecentCheckpointTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.curSegmentTxId_ = curSegmentTxId_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (storageInfoBuilder_ == null) { result.storageInfo_ = storageInfo_; } else { result.storageInfo_ = storageInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) return this; if (other.hasBlockPoolId()) { bitField0_ |= 0x00000001; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasMostRecentCheckpointTxId()) { setMostRecentCheckpointTxId(other.getMostRecentCheckpointTxId()); } if (other.hasCurSegmentTxId()) { setCurSegmentTxId(other.getCurSegmentTxId()); } if (other.hasStorageInfo()) { mergeStorageInfo(other.getStorageInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlockPoolId()) { return false; } if (!hasMostRecentCheckpointTxId()) { return false; } if (!hasCurSegmentTxId()) { return false; } if (!hasStorageInfo()) { return false; } if (!getStorageInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string blockPoolId = 1; private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); blockPoolId_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 1; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string blockPoolId = 1; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000001); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder setBlockPoolIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } // required uint64 mostRecentCheckpointTxId = 2; private long mostRecentCheckpointTxId_ ; /** * required uint64 mostRecentCheckpointTxId = 2; */ public boolean hasMostRecentCheckpointTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 mostRecentCheckpointTxId = 2; */ public long getMostRecentCheckpointTxId() { return mostRecentCheckpointTxId_; } /** * required uint64 mostRecentCheckpointTxId = 2; */ public Builder setMostRecentCheckpointTxId(long value) { bitField0_ |= 0x00000002; mostRecentCheckpointTxId_ = value; onChanged(); return this; } /** * required uint64 mostRecentCheckpointTxId = 2; */ public Builder clearMostRecentCheckpointTxId() { bitField0_ = (bitField0_ & ~0x00000002); mostRecentCheckpointTxId_ = 0L; onChanged(); return this; } // required uint64 curSegmentTxId = 3; private long curSegmentTxId_ ; /** * required uint64 curSegmentTxId = 3; */ public boolean hasCurSegmentTxId() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 curSegmentTxId = 3; */ public long getCurSegmentTxId() { return curSegmentTxId_; } /** * required uint64 curSegmentTxId = 3; */ public Builder setCurSegmentTxId(long value) { bitField0_ |= 0x00000004; curSegmentTxId_ = value; onChanged(); return this; } /** * required uint64 curSegmentTxId = 3; */ public Builder clearCurSegmentTxId() { bitField0_ = (bitField0_ & ~0x00000004); curSegmentTxId_ = 0L; onChanged(); return this; } // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { if (storageInfoBuilder_ == null) { return storageInfo_; } else { return storageInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storageInfo_ = value; onChanged(); } else { storageInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public Builder setStorageInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { if (storageInfoBuilder_ == null) { storageInfo_ = builderForValue.build(); onChanged(); } else { storageInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); } else { storageInfo_ = value; } onChanged(); } else { storageInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public Builder clearStorageInfo() { if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { bitField0_ |= 0x00000008; onChanged(); return getStorageInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { if (storageInfoBuilder_ != null) { return storageInfoBuilder_.getMessageOrBuilder(); } else { return storageInfo_; } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> getStorageInfoFieldBuilder() { if (storageInfoBuilder_ == null) { storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( storageInfo_, getParentForChildren(), isClean()); storageInfo_ = null; } return storageInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckpointSignatureProto) } static { defaultInstance = new CheckpointSignatureProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckpointSignatureProto) } public interface NamenodeCommandProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 action = 1; /** * required uint32 action = 1; */ boolean hasAction(); /** * required uint32 action = 1; */ int getAction(); // required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ boolean hasType(); /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType(); // optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ boolean hasCheckpointCmd(); /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd(); /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.NamenodeCommandProto} * *
   **
   * Command sent from one namenode to another namenode.
   * 
*/ public static final class NamenodeCommandProto extends com.google.protobuf.GeneratedMessage implements NamenodeCommandProtoOrBuilder { // Use NamenodeCommandProto.newBuilder() to construct. private NamenodeCommandProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NamenodeCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NamenodeCommandProto defaultInstance; public static NamenodeCommandProto getDefaultInstance() { return defaultInstance; } public NamenodeCommandProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NamenodeCommandProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; action_ = input.readUInt32(); break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; type_ = value; } break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = checkpointCmd_.toBuilder(); } checkpointCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(checkpointCmd_); checkpointCmd_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public NamenodeCommandProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NamenodeCommandProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } /** * Protobuf enum {@code hadoop.hdfs.NamenodeCommandProto.Type} */ public enum Type implements com.google.protobuf.ProtocolMessageEnum { /** * NamenodeCommand = 0; * *
       * Base command
       * 
*/ NamenodeCommand(0, 0), /** * CheckPointCommand = 1; * *
       * Check point command
       * 
*/ CheckPointCommand(1, 1), ; /** * NamenodeCommand = 0; * *
       * Base command
       * 
*/ public static final int NamenodeCommand_VALUE = 0; /** * CheckPointCommand = 1; * *
       * Check point command
       * 
*/ public static final int CheckPointCommand_VALUE = 1; public final int getNumber() { return value; } public static Type valueOf(int value) { switch (value) { case 0: return NamenodeCommand; case 1: return CheckPointCommand; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public Type findValueByNumber(int number) { return Type.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDescriptor().getEnumTypes().get(0); } private static final Type[] VALUES = values(); public static Type valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private Type(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.NamenodeCommandProto.Type) } private int bitField0_; // required uint32 action = 1; public static final int ACTION_FIELD_NUMBER = 1; private int action_; /** * required uint32 action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 action = 1; */ public int getAction() { return action_; } // required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; public static final int TYPE_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_; /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public boolean hasType() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { return type_; } // optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; public static final int CHECKPOINTCMD_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_; /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public boolean hasCheckpointCmd() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { return checkpointCmd_; } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { return checkpointCmd_; } private void initFields() { action_ = 0; type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasAction()) { memoizedIsInitialized = 0; return false; } if (!hasType()) { memoizedIsInitialized = 0; return false; } if (hasCheckpointCmd()) { if (!getCheckpointCmd().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, action_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeEnum(2, type_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, checkpointCmd_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, action_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(2, type_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, checkpointCmd_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) obj; boolean result = true; result = result && (hasAction() == other.hasAction()); if (hasAction()) { result = result && (getAction() == other.getAction()); } result = result && (hasType() == other.hasType()); if (hasType()) { result = result && (getType() == other.getType()); } result = result && (hasCheckpointCmd() == other.hasCheckpointCmd()); if (hasCheckpointCmd()) { result = result && getCheckpointCmd() .equals(other.getCheckpointCmd()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasAction()) { hash = (37 * hash) + ACTION_FIELD_NUMBER; hash = (53 * hash) + getAction(); } if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getType()); } if (hasCheckpointCmd()) { hash = (37 * hash) + CHECKPOINTCMD_FIELD_NUMBER; hash = (53 * hash) + getCheckpointCmd().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.NamenodeCommandProto} * *
     **
     * Command sent from one namenode to another namenode.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCheckpointCmdFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); action_ = 0; bitField0_ = (bitField0_ & ~0x00000001); type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; bitField0_ = (bitField0_ & ~0x00000002); if (checkpointCmdBuilder_ == null) { checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); } else { checkpointCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.action_ = action_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.type_ = type_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (checkpointCmdBuilder_ == null) { result.checkpointCmd_ = checkpointCmd_; } else { result.checkpointCmd_ = checkpointCmdBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } if (other.hasType()) { setType(other.getType()); } if (other.hasCheckpointCmd()) { mergeCheckpointCmd(other.getCheckpointCmd()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasAction()) { return false; } if (!hasType()) { return false; } if (hasCheckpointCmd()) { if (!getCheckpointCmd().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 action = 1; private int action_ ; /** * required uint32 action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 action = 1; */ public int getAction() { return action_; } /** * required uint32 action = 1; */ public Builder setAction(int value) { bitField0_ |= 0x00000001; action_ = value; onChanged(); return this; } /** * required uint32 action = 1; */ public Builder clearAction() { bitField0_ = (bitField0_ & ~0x00000001); action_ = 0; onChanged(); return this; } // required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public boolean hasType() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { return type_; } /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; type_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.NamenodeCommandProto.Type type = 2; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000002); type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; onChanged(); return this; } // optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> checkpointCmdBuilder_; /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public boolean hasCheckpointCmd() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { if (checkpointCmdBuilder_ == null) { return checkpointCmd_; } else { return checkpointCmdBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public Builder setCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { if (checkpointCmdBuilder_ == null) { if (value == null) { throw new NullPointerException(); } checkpointCmd_ = value; onChanged(); } else { checkpointCmdBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public Builder setCheckpointCmd( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder builderForValue) { if (checkpointCmdBuilder_ == null) { checkpointCmd_ = builderForValue.build(); onChanged(); } else { checkpointCmdBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public Builder mergeCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { if (checkpointCmdBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && checkpointCmd_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) { checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder(checkpointCmd_).mergeFrom(value).buildPartial(); } else { checkpointCmd_ = value; } onChanged(); } else { checkpointCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public Builder clearCheckpointCmd() { if (checkpointCmdBuilder_ == null) { checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); onChanged(); } else { checkpointCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder getCheckpointCmdBuilder() { bitField0_ |= 0x00000004; onChanged(); return getCheckpointCmdFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { if (checkpointCmdBuilder_ != null) { return checkpointCmdBuilder_.getMessageOrBuilder(); } else { return checkpointCmd_; } } /** * optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> getCheckpointCmdFieldBuilder() { if (checkpointCmdBuilder_ == null) { checkpointCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder>( checkpointCmd_, getParentForChildren(), isClean()); checkpointCmd_ = null; } return checkpointCmdBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamenodeCommandProto) } static { defaultInstance = new NamenodeCommandProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.NamenodeCommandProto) } public interface CheckpointCommandProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CheckpointSignatureProto signature = 1; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ boolean hasSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); // required bool needToReturnImage = 2; /** * required bool needToReturnImage = 2; * *
     * If true, return transfer image to primary upon the completion of checkpoint
     * 
*/ boolean hasNeedToReturnImage(); /** * required bool needToReturnImage = 2; * *
     * If true, return transfer image to primary upon the completion of checkpoint
     * 
*/ boolean getNeedToReturnImage(); } /** * Protobuf type {@code hadoop.hdfs.CheckpointCommandProto} * *
   **
   * Command returned from primary to checkpointing namenode.
   * This command has checkpoint signature that identifies
   * checkpoint transaction and is needed for further
   * communication related to checkpointing.
   * 
*/ public static final class CheckpointCommandProto extends com.google.protobuf.GeneratedMessage implements CheckpointCommandProtoOrBuilder { // Use CheckpointCommandProto.newBuilder() to construct. private CheckpointCommandProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CheckpointCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CheckpointCommandProto defaultInstance; public static CheckpointCommandProto getDefaultInstance() { return defaultInstance; } public CheckpointCommandProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CheckpointCommandProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = signature_.toBuilder(); } signature_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(signature_); signature_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; needToReturnImage_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public CheckpointCommandProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new CheckpointCommandProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CheckpointSignatureProto signature = 1; public static final int SIGNATURE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ public boolean hasSignature() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { return signature_; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
     * Unique signature to identify checkpoint transation
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { return signature_; } // required bool needToReturnImage = 2; public static final int NEEDTORETURNIMAGE_FIELD_NUMBER = 2; private boolean needToReturnImage_; /** * required bool needToReturnImage = 2; * *
     * If true, return transfer image to primary upon the completion of checkpoint
     * 
*/ public boolean hasNeedToReturnImage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool needToReturnImage = 2; * *
     * If true, return transfer image to primary upon the completion of checkpoint
     * 
*/ public boolean getNeedToReturnImage() { return needToReturnImage_; } private void initFields() { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); needToReturnImage_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSignature()) { memoizedIsInitialized = 0; return false; } if (!hasNeedToReturnImage()) { memoizedIsInitialized = 0; return false; } if (!getSignature().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, signature_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, needToReturnImage_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, signature_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, needToReturnImage_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) obj; boolean result = true; result = result && (hasSignature() == other.hasSignature()); if (hasSignature()) { result = result && getSignature() .equals(other.getSignature()); } result = result && (hasNeedToReturnImage() == other.hasNeedToReturnImage()); if (hasNeedToReturnImage()) { result = result && (getNeedToReturnImage() == other.getNeedToReturnImage()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSignature()) { hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; hash = (53 * hash) + getSignature().hashCode(); } if (hasNeedToReturnImage()) { hash = (37 * hash) + NEEDTORETURNIMAGE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getNeedToReturnImage()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CheckpointCommandProto} * *
     **
     * Command returned from primary to checkpointing namenode.
     * This command has checkpoint signature that identifies
     * checkpoint transaction and is needed for further
     * communication related to checkpointing.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSignatureFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (signatureBuilder_ == null) { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); needToReturnImage_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (signatureBuilder_ == null) { result.signature_ = signature_; } else { result.signature_ = signatureBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.needToReturnImage_ = needToReturnImage_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) return this; if (other.hasSignature()) { mergeSignature(other.getSignature()); } if (other.hasNeedToReturnImage()) { setNeedToReturnImage(other.getNeedToReturnImage()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSignature()) { return false; } if (!hasNeedToReturnImage()) { return false; } if (!getSignature().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CheckpointSignatureProto signature = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public boolean hasSignature() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { if (signatureBuilder_ == null) { return signature_; } else { return signatureBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (value == null) { throw new NullPointerException(); } signature_ = value; onChanged(); } else { signatureBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public Builder setSignature( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { if (signatureBuilder_ == null) { signature_ = builderForValue.build(); onChanged(); } else { signatureBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { if (signatureBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); } else { signature_ = value; } onChanged(); } else { signatureBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public Builder clearSignature() { if (signatureBuilder_ == null) { signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); onChanged(); } else { signatureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSignatureFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { if (signatureBuilder_ != null) { return signatureBuilder_.getMessageOrBuilder(); } else { return signature_; } } /** * required .hadoop.hdfs.CheckpointSignatureProto signature = 1; * *
       * Unique signature to identify checkpoint transation
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> getSignatureFieldBuilder() { if (signatureBuilder_ == null) { signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( signature_, getParentForChildren(), isClean()); signature_ = null; } return signatureBuilder_; } // required bool needToReturnImage = 2; private boolean needToReturnImage_ ; /** * required bool needToReturnImage = 2; * *
       * If true, return transfer image to primary upon the completion of checkpoint
       * 
*/ public boolean hasNeedToReturnImage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool needToReturnImage = 2; * *
       * If true, return transfer image to primary upon the completion of checkpoint
       * 
*/ public boolean getNeedToReturnImage() { return needToReturnImage_; } /** * required bool needToReturnImage = 2; * *
       * If true, return transfer image to primary upon the completion of checkpoint
       * 
*/ public Builder setNeedToReturnImage(boolean value) { bitField0_ |= 0x00000002; needToReturnImage_ = value; onChanged(); return this; } /** * required bool needToReturnImage = 2; * *
       * If true, return transfer image to primary upon the completion of checkpoint
       * 
*/ public Builder clearNeedToReturnImage() { bitField0_ = (bitField0_ & ~0x00000002); needToReturnImage_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckpointCommandProto) } static { defaultInstance = new CheckpointCommandProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckpointCommandProto) } public interface BlockProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 blockId = 1; /** * required uint64 blockId = 1; */ boolean hasBlockId(); /** * required uint64 blockId = 1; */ long getBlockId(); // required uint64 genStamp = 2; /** * required uint64 genStamp = 2; */ boolean hasGenStamp(); /** * required uint64 genStamp = 2; */ long getGenStamp(); // optional uint64 numBytes = 3 [default = 0]; /** * optional uint64 numBytes = 3 [default = 0]; */ boolean hasNumBytes(); /** * optional uint64 numBytes = 3 [default = 0]; */ long getNumBytes(); } /** * Protobuf type {@code hadoop.hdfs.BlockProto} * *
   **
   * Block information
   *
   * Please be wary of adding additional fields here, since INodeFiles
   * need to fit in PB's default max message size of 64MB.
   * We restrict the max # of blocks per file
   * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
   * to avoid changing this.
   * 
*/ public static final class BlockProto extends com.google.protobuf.GeneratedMessage implements BlockProtoOrBuilder { // Use BlockProto.newBuilder() to construct. private BlockProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private BlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final BlockProto defaultInstance; public static BlockProto getDefaultInstance() { return defaultInstance; } public BlockProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; blockId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; genStamp_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; numBytes_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public BlockProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BlockProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 blockId = 1; public static final int BLOCKID_FIELD_NUMBER = 1; private long blockId_; /** * required uint64 blockId = 1; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 blockId = 1; */ public long getBlockId() { return blockId_; } // required uint64 genStamp = 2; public static final int GENSTAMP_FIELD_NUMBER = 2; private long genStamp_; /** * required uint64 genStamp = 2; */ public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 genStamp = 2; */ public long getGenStamp() { return genStamp_; } // optional uint64 numBytes = 3 [default = 0]; public static final int NUMBYTES_FIELD_NUMBER = 3; private long numBytes_; /** * optional uint64 numBytes = 3 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 numBytes = 3 [default = 0]; */ public long getNumBytes() { return numBytes_; } private void initFields() { blockId_ = 0L; genStamp_ = 0L; numBytes_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlockId()) { memoizedIsInitialized = 0; return false; } if (!hasGenStamp()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, blockId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, genStamp_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, numBytes_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, blockId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, genStamp_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, numBytes_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj; boolean result = true; result = result && (hasBlockId() == other.hasBlockId()); if (hasBlockId()) { result = result && (getBlockId() == other.getBlockId()); } result = result && (hasGenStamp() == other.hasGenStamp()); if (hasGenStamp()) { result = result && (getGenStamp() == other.getGenStamp()); } result = result && (hasNumBytes() == other.hasNumBytes()); if (hasNumBytes()) { result = result && (getNumBytes() == other.getNumBytes()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlockId()) { hash = (37 * hash) + BLOCKID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockId()); } if (hasGenStamp()) { hash = (37 * hash) + GENSTAMP_FIELD_NUMBER; hash = (53 * hash) + hashLong(getGenStamp()); } if (hasNumBytes()) { hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNumBytes()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BlockProto} * *
     **
     * Block information
     *
     * Please be wary of adding additional fields here, since INodeFiles
     * need to fit in PB's default max message size of 64MB.
     * We restrict the max # of blocks per file
     * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
     * to avoid changing this.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); blockId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); genStamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); numBytes_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.blockId_ = blockId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.genStamp_ = genStamp_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.numBytes_ = numBytes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this; if (other.hasBlockId()) { setBlockId(other.getBlockId()); } if (other.hasGenStamp()) { setGenStamp(other.getGenStamp()); } if (other.hasNumBytes()) { setNumBytes(other.getNumBytes()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlockId()) { return false; } if (!hasGenStamp()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 blockId = 1; private long blockId_ ; /** * required uint64 blockId = 1; */ public boolean hasBlockId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 blockId = 1; */ public long getBlockId() { return blockId_; } /** * required uint64 blockId = 1; */ public Builder setBlockId(long value) { bitField0_ |= 0x00000001; blockId_ = value; onChanged(); return this; } /** * required uint64 blockId = 1; */ public Builder clearBlockId() { bitField0_ = (bitField0_ & ~0x00000001); blockId_ = 0L; onChanged(); return this; } // required uint64 genStamp = 2; private long genStamp_ ; /** * required uint64 genStamp = 2; */ public boolean hasGenStamp() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 genStamp = 2; */ public long getGenStamp() { return genStamp_; } /** * required uint64 genStamp = 2; */ public Builder setGenStamp(long value) { bitField0_ |= 0x00000002; genStamp_ = value; onChanged(); return this; } /** * required uint64 genStamp = 2; */ public Builder clearGenStamp() { bitField0_ = (bitField0_ & ~0x00000002); genStamp_ = 0L; onChanged(); return this; } // optional uint64 numBytes = 3 [default = 0]; private long numBytes_ ; /** * optional uint64 numBytes = 3 [default = 0]; */ public boolean hasNumBytes() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 numBytes = 3 [default = 0]; */ public long getNumBytes() { return numBytes_; } /** * optional uint64 numBytes = 3 [default = 0]; */ public Builder setNumBytes(long value) { bitField0_ |= 0x00000004; numBytes_ = value; onChanged(); return this; } /** * optional uint64 numBytes = 3 [default = 0]; */ public Builder clearNumBytes() { bitField0_ = (bitField0_ & ~0x00000004); numBytes_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockProto) } static { defaultInstance = new BlockProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockProto) } public interface BlockWithLocationsProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.BlockProto block = 1; /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ boolean hasBlock(); /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); // repeated string datanodeUuids = 2; /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ java.util.List getDatanodeUuidsList(); /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ int getDatanodeUuidsCount(); /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ java.lang.String getDatanodeUuids(int index); /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ com.google.protobuf.ByteString getDatanodeUuidsBytes(int index); // repeated string storageUuids = 3; /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ java.util.List getStorageUuidsList(); /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ int getStorageUuidsCount(); /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ java.lang.String getStorageUuids(int index); /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ com.google.protobuf.ByteString getStorageUuidsBytes(int index); // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ java.util.List getStorageTypesList(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ int getStorageTypesCount(); /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index); } /** * Protobuf type {@code hadoop.hdfs.BlockWithLocationsProto} * *
   **
   * Block and datanodes where is it located
   * 
*/ public static final class BlockWithLocationsProto extends com.google.protobuf.GeneratedMessage implements BlockWithLocationsProtoOrBuilder { // Use BlockWithLocationsProto.newBuilder() to construct. private BlockWithLocationsProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private BlockWithLocationsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final BlockWithLocationsProto defaultInstance; public static BlockWithLocationsProto getDefaultInstance() { return defaultInstance; } public BlockWithLocationsProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockWithLocationsProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { datanodeUuids_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000002; } datanodeUuids_.add(input.readBytes()); break; } case 26: { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { storageUuids_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000004; } storageUuids_.add(input.readBytes()); break; } case 32: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(4, rawValue); } else { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } storageTypes_.add(value); } break; } case 34: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(4, rawValue); } else { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { storageTypes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } storageTypes_.add(value); } } input.popLimit(oldLimit); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { datanodeUuids_ = new com.google.protobuf.UnmodifiableLazyStringList(datanodeUuids_); } if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList(storageUuids_); } if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public BlockWithLocationsProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BlockWithLocationsProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.BlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.BlockProto block = 1; * *
     * Block
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { return block_; } // repeated string datanodeUuids = 2; public static final int DATANODEUUIDS_FIELD_NUMBER = 2; private com.google.protobuf.LazyStringList datanodeUuids_; /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ public java.util.List getDatanodeUuidsList() { return datanodeUuids_; } /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ public int getDatanodeUuidsCount() { return datanodeUuids_.size(); } /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ public java.lang.String getDatanodeUuids(int index) { return datanodeUuids_.get(index); } /** * repeated string datanodeUuids = 2; * *
     * Datanodes with replicas of the block
     * 
*/ public com.google.protobuf.ByteString getDatanodeUuidsBytes(int index) { return datanodeUuids_.getByteString(index); } // repeated string storageUuids = 3; public static final int STORAGEUUIDS_FIELD_NUMBER = 3; private com.google.protobuf.LazyStringList storageUuids_; /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ public java.util.List getStorageUuidsList() { return storageUuids_; } /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 3; * *
     * Storages with replicas of the block
     * 
*/ public com.google.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; public static final int STORAGETYPES_FIELD_NUMBER = 4; private java.util.List storageTypes_; /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public java.util.List getStorageTypesList() { return storageTypes_; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); datanodeUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; storageTypes_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } for (int i = 0; i < datanodeUuids_.size(); i++) { output.writeBytes(2, datanodeUuids_.getByteString(i)); } for (int i = 0; i < storageUuids_.size(); i++) { output.writeBytes(3, storageUuids_.getByteString(i)); } for (int i = 0; i < storageTypes_.size(); i++) { output.writeEnum(4, storageTypes_.get(i).getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } { int dataSize = 0; for (int i = 0; i < datanodeUuids_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(datanodeUuids_.getByteString(i)); } size += dataSize; size += 1 * getDatanodeUuidsList().size(); } { int dataSize = 0; for (int i = 0; i < storageUuids_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(storageUuids_.getByteString(i)); } size += dataSize; size += 1 * getStorageUuidsList().size(); } { int dataSize = 0; for (int i = 0; i < storageTypes_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(storageTypes_.get(i).getNumber()); } size += dataSize; size += 1 * storageTypes_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && getDatanodeUuidsList() .equals(other.getDatanodeUuidsList()); result = result && getStorageUuidsList() .equals(other.getStorageUuidsList()); result = result && getStorageTypesList() .equals(other.getStorageTypesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } if (getDatanodeUuidsCount() > 0) { hash = (37 * hash) + DATANODEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getDatanodeUuidsList().hashCode(); } if (getStorageUuidsCount() > 0) { hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageUuidsList().hashCode(); } if (getStorageTypesCount() > 0) { hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getStorageTypesList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BlockWithLocationsProto} * *
     **
     * Block and datanodes where is it located
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); datanodeUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } if (((bitField0_ & 0x00000002) == 0x00000002)) { datanodeUuids_ = new com.google.protobuf.UnmodifiableLazyStringList( datanodeUuids_); bitField0_ = (bitField0_ & ~0x00000002); } result.datanodeUuids_ = datanodeUuids_; if (((bitField0_ & 0x00000004) == 0x00000004)) { storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList( storageUuids_); bitField0_ = (bitField0_ & ~0x00000004); } result.storageUuids_ = storageUuids_; if (((bitField0_ & 0x00000008) == 0x00000008)) { storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_); bitField0_ = (bitField0_ & ~0x00000008); } result.storageTypes_ = storageTypes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } if (!other.datanodeUuids_.isEmpty()) { if (datanodeUuids_.isEmpty()) { datanodeUuids_ = other.datanodeUuids_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureDatanodeUuidsIsMutable(); datanodeUuids_.addAll(other.datanodeUuids_); } onChanged(); } if (!other.storageUuids_.isEmpty()) { if (storageUuids_.isEmpty()) { storageUuids_ = other.storageUuids_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureStorageUuidsIsMutable(); storageUuids_.addAll(other.storageUuids_); } onChanged(); } if (!other.storageTypes_.isEmpty()) { if (storageTypes_.isEmpty()) { storageTypes_ = other.storageTypes_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureStorageTypesIsMutable(); storageTypes_.addAll(other.storageTypes_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlock()) { return false; } if (!getBlock().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.BlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.BlockProto block = 1; * *
       * Block
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // repeated string datanodeUuids = 2; private com.google.protobuf.LazyStringList datanodeUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureDatanodeUuidsIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { datanodeUuids_ = new com.google.protobuf.LazyStringArrayList(datanodeUuids_); bitField0_ |= 0x00000002; } } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public java.util.List getDatanodeUuidsList() { return java.util.Collections.unmodifiableList(datanodeUuids_); } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public int getDatanodeUuidsCount() { return datanodeUuids_.size(); } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public java.lang.String getDatanodeUuids(int index) { return datanodeUuids_.get(index); } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public com.google.protobuf.ByteString getDatanodeUuidsBytes(int index) { return datanodeUuids_.getByteString(index); } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public Builder setDatanodeUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureDatanodeUuidsIsMutable(); datanodeUuids_.set(index, value); onChanged(); return this; } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public Builder addDatanodeUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureDatanodeUuidsIsMutable(); datanodeUuids_.add(value); onChanged(); return this; } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public Builder addAllDatanodeUuids( java.lang.Iterable values) { ensureDatanodeUuidsIsMutable(); super.addAll(values, datanodeUuids_); onChanged(); return this; } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public Builder clearDatanodeUuids() { datanodeUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * repeated string datanodeUuids = 2; * *
       * Datanodes with replicas of the block
       * 
*/ public Builder addDatanodeUuidsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureDatanodeUuidsIsMutable(); datanodeUuids_.add(value); onChanged(); return this; } // repeated string storageUuids = 3; private com.google.protobuf.LazyStringList storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageUuidsIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { storageUuids_ = new com.google.protobuf.LazyStringArrayList(storageUuids_); bitField0_ |= 0x00000004; } } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public java.util.List getStorageUuidsList() { return java.util.Collections.unmodifiableList(storageUuids_); } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public int getStorageUuidsCount() { return storageUuids_.size(); } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public java.lang.String getStorageUuids(int index) { return storageUuids_.get(index); } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public com.google.protobuf.ByteString getStorageUuidsBytes(int index) { return storageUuids_.getByteString(index); } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public Builder setStorageUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.set(index, value); onChanged(); return this; } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public Builder addStorageUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public Builder addAllStorageUuids( java.lang.Iterable values) { ensureStorageUuidsIsMutable(); super.addAll(values, storageUuids_); onChanged(); return this; } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public Builder clearStorageUuids() { storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * repeated string storageUuids = 3; * *
       * Storages with replicas of the block
       * 
*/ public Builder addStorageUuidsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageUuidsIsMutable(); storageUuids_.add(value); onChanged(); return this; } // repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; private java.util.List storageTypes_ = java.util.Collections.emptyList(); private void ensureStorageTypesIsMutable() { if (!((bitField0_ & 0x00000008) == 0x00000008)) { storageTypes_ = new java.util.ArrayList(storageTypes_); bitField0_ |= 0x00000008; } } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public java.util.List getStorageTypesList() { return java.util.Collections.unmodifiableList(storageTypes_); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public int getStorageTypesCount() { return storageTypes_.size(); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) { return storageTypes_.get(index); } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public Builder setStorageTypes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureStorageTypesIsMutable(); storageTypes_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public Builder addAllStorageTypes( java.lang.Iterable values) { ensureStorageTypesIsMutable(); super.addAll(values, storageTypes_); onChanged(); return this; } /** * repeated .hadoop.hdfs.StorageTypeProto storageTypes = 4; */ public Builder clearStorageTypes() { storageTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockWithLocationsProto) } static { defaultInstance = new BlockWithLocationsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockWithLocationsProto) } public interface BlocksWithLocationsProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index); /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.BlocksWithLocationsProto} * *
   **
   * List of block with locations
   * 
*/ public static final class BlocksWithLocationsProto extends com.google.protobuf.GeneratedMessage implements BlocksWithLocationsProtoOrBuilder { // Use BlocksWithLocationsProto.newBuilder() to construct. private BlocksWithLocationsProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private BlocksWithLocationsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final BlocksWithLocationsProto defaultInstance; public static BlocksWithLocationsProto getDefaultInstance() { return defaultInstance; } public BlocksWithLocationsProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlocksWithLocationsProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public BlocksWithLocationsProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BlocksWithLocationsProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; public static final int BLOCKS_FIELD_NUMBER = 1; private java.util.List blocks_; /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } private void initFields() { blocks_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(1, blocks_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < blocks_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, blocks_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) obj; boolean result = true; result = result && getBlocksList() .equals(other.getBlocksList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BlocksWithLocationsProto} * *
     **
     * List of block with locations
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlocksFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { blocksBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto(this); int from_bitField0_ = bitField0_; if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000001); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance()) return this; if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000001); blocksBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); super.addAll(values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockWithLocationsProto blocks = 1; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlocksWithLocationsProto) } static { defaultInstance = new BlocksWithLocationsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlocksWithLocationsProto) } public interface RemoteEditLogProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 startTxId = 1; /** * required uint64 startTxId = 1; * *
     * Starting available edit log transaction
     * 
*/ boolean hasStartTxId(); /** * required uint64 startTxId = 1; * *
     * Starting available edit log transaction
     * 
*/ long getStartTxId(); // required uint64 endTxId = 2; /** * required uint64 endTxId = 2; * *
     * Ending available edit log transaction
     * 
*/ boolean hasEndTxId(); /** * required uint64 endTxId = 2; * *
     * Ending available edit log transaction
     * 
*/ long getEndTxId(); // optional bool isInProgress = 3 [default = false]; /** * optional bool isInProgress = 3 [default = false]; */ boolean hasIsInProgress(); /** * optional bool isInProgress = 3 [default = false]; */ boolean getIsInProgress(); } /** * Protobuf type {@code hadoop.hdfs.RemoteEditLogProto} * *
   **
   * Editlog information with available transactions
   * 
*/ public static final class RemoteEditLogProto extends com.google.protobuf.GeneratedMessage implements RemoteEditLogProtoOrBuilder { // Use RemoteEditLogProto.newBuilder() to construct. private RemoteEditLogProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoteEditLogProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoteEditLogProto defaultInstance; public static RemoteEditLogProto getDefaultInstance() { return defaultInstance; } public RemoteEditLogProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoteEditLogProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; startTxId_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; endTxId_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; isInProgress_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RemoteEditLogProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RemoteEditLogProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 startTxId = 1; public static final int STARTTXID_FIELD_NUMBER = 1; private long startTxId_; /** * required uint64 startTxId = 1; * *
     * Starting available edit log transaction
     * 
*/ public boolean hasStartTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 startTxId = 1; * *
     * Starting available edit log transaction
     * 
*/ public long getStartTxId() { return startTxId_; } // required uint64 endTxId = 2; public static final int ENDTXID_FIELD_NUMBER = 2; private long endTxId_; /** * required uint64 endTxId = 2; * *
     * Ending available edit log transaction
     * 
*/ public boolean hasEndTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 endTxId = 2; * *
     * Ending available edit log transaction
     * 
*/ public long getEndTxId() { return endTxId_; } // optional bool isInProgress = 3 [default = false]; public static final int ISINPROGRESS_FIELD_NUMBER = 3; private boolean isInProgress_; /** * optional bool isInProgress = 3 [default = false]; */ public boolean hasIsInProgress() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool isInProgress = 3 [default = false]; */ public boolean getIsInProgress() { return isInProgress_; } private void initFields() { startTxId_ = 0L; endTxId_ = 0L; isInProgress_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStartTxId()) { memoizedIsInitialized = 0; return false; } if (!hasEndTxId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, startTxId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, endTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, isInProgress_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, startTxId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, endTxId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(3, isInProgress_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) obj; boolean result = true; result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && (hasIsInProgress() == other.hasIsInProgress()); if (hasIsInProgress()) { result = result && (getIsInProgress() == other.getIsInProgress()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } if (hasIsInProgress()) { hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsInProgress()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoteEditLogProto} * *
     **
     * Editlog information with available transactions
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); startTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); endTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); isInProgress_ = false; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.startTxId_ = startTxId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.endTxId_ = endTxId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.isInProgress_ = isInProgress_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStartTxId()) { return false; } if (!hasEndTxId()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 startTxId = 1; private long startTxId_ ; /** * required uint64 startTxId = 1; * *
       * Starting available edit log transaction
       * 
*/ public boolean hasStartTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 startTxId = 1; * *
       * Starting available edit log transaction
       * 
*/ public long getStartTxId() { return startTxId_; } /** * required uint64 startTxId = 1; * *
       * Starting available edit log transaction
       * 
*/ public Builder setStartTxId(long value) { bitField0_ |= 0x00000001; startTxId_ = value; onChanged(); return this; } /** * required uint64 startTxId = 1; * *
       * Starting available edit log transaction
       * 
*/ public Builder clearStartTxId() { bitField0_ = (bitField0_ & ~0x00000001); startTxId_ = 0L; onChanged(); return this; } // required uint64 endTxId = 2; private long endTxId_ ; /** * required uint64 endTxId = 2; * *
       * Ending available edit log transaction
       * 
*/ public boolean hasEndTxId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 endTxId = 2; * *
       * Ending available edit log transaction
       * 
*/ public long getEndTxId() { return endTxId_; } /** * required uint64 endTxId = 2; * *
       * Ending available edit log transaction
       * 
*/ public Builder setEndTxId(long value) { bitField0_ |= 0x00000002; endTxId_ = value; onChanged(); return this; } /** * required uint64 endTxId = 2; * *
       * Ending available edit log transaction
       * 
*/ public Builder clearEndTxId() { bitField0_ = (bitField0_ & ~0x00000002); endTxId_ = 0L; onChanged(); return this; } // optional bool isInProgress = 3 [default = false]; private boolean isInProgress_ ; /** * optional bool isInProgress = 3 [default = false]; */ public boolean hasIsInProgress() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool isInProgress = 3 [default = false]; */ public boolean getIsInProgress() { return isInProgress_; } /** * optional bool isInProgress = 3 [default = false]; */ public Builder setIsInProgress(boolean value) { bitField0_ |= 0x00000004; isInProgress_ = value; onChanged(); return this; } /** * optional bool isInProgress = 3 [default = false]; */ public Builder clearIsInProgress() { bitField0_ = (bitField0_ & ~0x00000004); isInProgress_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteEditLogProto) } static { defaultInstance = new RemoteEditLogProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteEditLogProto) } public interface RemoteEditLogManifestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ java.util.List getLogsList(); /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index); /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ int getLogsCount(); /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ java.util.List getLogsOrBuilderList(); /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.RemoteEditLogManifestProto} * *
   **
   * Enumeration of editlogs available on a remote namenode
   * 
*/ public static final class RemoteEditLogManifestProto extends com.google.protobuf.GeneratedMessage implements RemoteEditLogManifestProtoOrBuilder { // Use RemoteEditLogManifestProto.newBuilder() to construct. private RemoteEditLogManifestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoteEditLogManifestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoteEditLogManifestProto defaultInstance; public static RemoteEditLogManifestProto getDefaultInstance() { return defaultInstance; } public RemoteEditLogManifestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoteEditLogManifestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { logs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } logs_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { logs_ = java.util.Collections.unmodifiableList(logs_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RemoteEditLogManifestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RemoteEditLogManifestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; public static final int LOGS_FIELD_NUMBER = 1; private java.util.List logs_; /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public java.util.List getLogsList() { return logs_; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public java.util.List getLogsOrBuilderList() { return logs_; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public int getLogsCount() { return logs_.size(); } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { return logs_.get(index); } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( int index) { return logs_.get(index); } private void initFields() { logs_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getLogsCount(); i++) { if (!getLogs(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < logs_.size(); i++) { output.writeMessage(1, logs_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < logs_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, logs_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) obj; boolean result = true; result = result && getLogsList() .equals(other.getLogsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getLogsCount() > 0) { hash = (37 * hash) + LOGS_FIELD_NUMBER; hash = (53 * hash) + getLogsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoteEditLogManifestProto} * *
     **
     * Enumeration of editlogs available on a remote namenode
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getLogsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (logsBuilder_ == null) { logs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { logsBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto(this); int from_bitField0_ = bitField0_; if (logsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { logs_ = java.util.Collections.unmodifiableList(logs_); bitField0_ = (bitField0_ & ~0x00000001); } result.logs_ = logs_; } else { result.logs_ = logsBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) return this; if (logsBuilder_ == null) { if (!other.logs_.isEmpty()) { if (logs_.isEmpty()) { logs_ = other.logs_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureLogsIsMutable(); logs_.addAll(other.logs_); } onChanged(); } } else { if (!other.logs_.isEmpty()) { if (logsBuilder_.isEmpty()) { logsBuilder_.dispose(); logsBuilder_ = null; logs_ = other.logs_; bitField0_ = (bitField0_ & ~0x00000001); logsBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getLogsFieldBuilder() : null; } else { logsBuilder_.addAllMessages(other.logs_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getLogsCount(); i++) { if (!getLogs(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; private java.util.List logs_ = java.util.Collections.emptyList(); private void ensureLogsIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { logs_ = new java.util.ArrayList(logs_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> logsBuilder_; /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public java.util.List getLogsList() { if (logsBuilder_ == null) { return java.util.Collections.unmodifiableList(logs_); } else { return logsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public int getLogsCount() { if (logsBuilder_ == null) { return logs_.size(); } else { return logsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { if (logsBuilder_ == null) { return logs_.get(index); } else { return logsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder setLogs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { if (logsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLogsIsMutable(); logs_.set(index, value); onChanged(); } else { logsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder setLogs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { if (logsBuilder_ == null) { ensureLogsIsMutable(); logs_.set(index, builderForValue.build()); onChanged(); } else { logsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder addLogs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { if (logsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLogsIsMutable(); logs_.add(value); onChanged(); } else { logsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder addLogs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { if (logsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLogsIsMutable(); logs_.add(index, value); onChanged(); } else { logsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder addLogs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { if (logsBuilder_ == null) { ensureLogsIsMutable(); logs_.add(builderForValue.build()); onChanged(); } else { logsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder addLogs( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { if (logsBuilder_ == null) { ensureLogsIsMutable(); logs_.add(index, builderForValue.build()); onChanged(); } else { logsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder addAllLogs( java.lang.Iterable values) { if (logsBuilder_ == null) { ensureLogsIsMutable(); super.addAll(values, logs_); onChanged(); } else { logsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder clearLogs() { if (logsBuilder_ == null) { logs_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { logsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public Builder removeLogs(int index) { if (logsBuilder_ == null) { ensureLogsIsMutable(); logs_.remove(index); onChanged(); } else { logsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder getLogsBuilder( int index) { return getLogsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( int index) { if (logsBuilder_ == null) { return logs_.get(index); } else { return logsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public java.util.List getLogsOrBuilderList() { if (logsBuilder_ != null) { return logsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(logs_); } } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder() { return getLogsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder( int index) { return getLogsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.RemoteEditLogProto logs = 1; */ public java.util.List getLogsBuilderList() { return getLogsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> getLogsFieldBuilder() { if (logsBuilder_ == null) { logsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder>( logs_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); logs_ = null; } return logsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteEditLogManifestProto) } static { defaultInstance = new RemoteEditLogManifestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteEditLogManifestProto) } public interface NamespaceInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string buildVersion = 1; /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ boolean hasBuildVersion(); /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ java.lang.String getBuildVersion(); /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ com.google.protobuf.ByteString getBuildVersionBytes(); // required uint32 unused = 2; /** * required uint32 unused = 2; * *
     * Retained for backward compatibility
     * 
*/ boolean hasUnused(); /** * required uint32 unused = 2; * *
     * Retained for backward compatibility
     * 
*/ int getUnused(); // required string blockPoolID = 3; /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ boolean hasBlockPoolID(); /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ java.lang.String getBlockPoolID(); /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ com.google.protobuf.ByteString getBlockPoolIDBytes(); // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ boolean hasStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); // required string softwareVersion = 5; /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ boolean hasSoftwareVersion(); /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ java.lang.String getSoftwareVersion(); /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ com.google.protobuf.ByteString getSoftwareVersionBytes(); // optional uint64 capabilities = 6 [default = 0]; /** * optional uint64 capabilities = 6 [default = 0]; * *
     * feature flags
     * 
*/ boolean hasCapabilities(); /** * optional uint64 capabilities = 6 [default = 0]; * *
     * feature flags
     * 
*/ long getCapabilities(); } /** * Protobuf type {@code hadoop.hdfs.NamespaceInfoProto} * *
   **
   * Namespace information that describes namespace on a namenode
   * 
*/ public static final class NamespaceInfoProto extends com.google.protobuf.GeneratedMessage implements NamespaceInfoProtoOrBuilder { // Use NamespaceInfoProto.newBuilder() to construct. private NamespaceInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NamespaceInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NamespaceInfoProto defaultInstance; public static NamespaceInfoProto getDefaultInstance() { return defaultInstance; } public NamespaceInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NamespaceInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; buildVersion_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; unused_ = input.readUInt32(); break; } case 26: { bitField0_ |= 0x00000004; blockPoolID_ = input.readBytes(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = storageInfo_.toBuilder(); } storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storageInfo_); storageInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { bitField0_ |= 0x00000010; softwareVersion_ = input.readBytes(); break; } case 48: { bitField0_ |= 0x00000020; capabilities_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public NamespaceInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NamespaceInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string buildVersion = 1; public static final int BUILDVERSION_FIELD_NUMBER = 1; private java.lang.Object buildVersion_; /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ public boolean hasBuildVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ public java.lang.String getBuildVersion() { java.lang.Object ref = buildVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { buildVersion_ = s; } return s; } } /** * required string buildVersion = 1; * *
     * Software revision version (e.g. an svn or git revision)
     * 
*/ public com.google.protobuf.ByteString getBuildVersionBytes() { java.lang.Object ref = buildVersion_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); buildVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required uint32 unused = 2; public static final int UNUSED_FIELD_NUMBER = 2; private int unused_; /** * required uint32 unused = 2; * *
     * Retained for backward compatibility
     * 
*/ public boolean hasUnused() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 unused = 2; * *
     * Retained for backward compatibility
     * 
*/ public int getUnused() { return unused_; } // required string blockPoolID = 3; public static final int BLOCKPOOLID_FIELD_NUMBER = 3; private java.lang.Object blockPoolID_; /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ public boolean hasBlockPoolID() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ public java.lang.String getBlockPoolID() { java.lang.Object ref = blockPoolID_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolID_ = s; } return s; } } /** * required string blockPoolID = 3; * *
     * block pool used by the namespace
     * 
*/ public com.google.protobuf.ByteString getBlockPoolIDBytes() { java.lang.Object ref = blockPoolID_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolID_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; public static final int STORAGEINFO_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { return storageInfo_; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
     * Node information
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { return storageInfo_; } // required string softwareVersion = 5; public static final int SOFTWAREVERSION_FIELD_NUMBER = 5; private java.lang.Object softwareVersion_; /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { softwareVersion_ = s; } return s; } } /** * required string softwareVersion = 5; * *
     * Software version number (e.g. 2.0.0)
     * 
*/ public com.google.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional uint64 capabilities = 6 [default = 0]; public static final int CAPABILITIES_FIELD_NUMBER = 6; private long capabilities_; /** * optional uint64 capabilities = 6 [default = 0]; * *
     * feature flags
     * 
*/ public boolean hasCapabilities() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 capabilities = 6 [default = 0]; * *
     * feature flags
     * 
*/ public long getCapabilities() { return capabilities_; } private void initFields() { buildVersion_ = ""; unused_ = 0; blockPoolID_ = ""; storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); softwareVersion_ = ""; capabilities_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBuildVersion()) { memoizedIsInitialized = 0; return false; } if (!hasUnused()) { memoizedIsInitialized = 0; return false; } if (!hasBlockPoolID()) { memoizedIsInitialized = 0; return false; } if (!hasStorageInfo()) { memoizedIsInitialized = 0; return false; } if (!hasSoftwareVersion()) { memoizedIsInitialized = 0; return false; } if (!getStorageInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getBuildVersionBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, unused_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getBlockPoolIDBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, storageInfo_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, getSoftwareVersionBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, capabilities_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getBuildVersionBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, unused_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, getBlockPoolIDBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, storageInfo_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(5, getSoftwareVersionBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, capabilities_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) obj; boolean result = true; result = result && (hasBuildVersion() == other.hasBuildVersion()); if (hasBuildVersion()) { result = result && getBuildVersion() .equals(other.getBuildVersion()); } result = result && (hasUnused() == other.hasUnused()); if (hasUnused()) { result = result && (getUnused() == other.getUnused()); } result = result && (hasBlockPoolID() == other.hasBlockPoolID()); if (hasBlockPoolID()) { result = result && getBlockPoolID() .equals(other.getBlockPoolID()); } result = result && (hasStorageInfo() == other.hasStorageInfo()); if (hasStorageInfo()) { result = result && getStorageInfo() .equals(other.getStorageInfo()); } result = result && (hasSoftwareVersion() == other.hasSoftwareVersion()); if (hasSoftwareVersion()) { result = result && getSoftwareVersion() .equals(other.getSoftwareVersion()); } result = result && (hasCapabilities() == other.hasCapabilities()); if (hasCapabilities()) { result = result && (getCapabilities() == other.getCapabilities()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBuildVersion()) { hash = (37 * hash) + BUILDVERSION_FIELD_NUMBER; hash = (53 * hash) + getBuildVersion().hashCode(); } if (hasUnused()) { hash = (37 * hash) + UNUSED_FIELD_NUMBER; hash = (53 * hash) + getUnused(); } if (hasBlockPoolID()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolID().hashCode(); } if (hasStorageInfo()) { hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; hash = (53 * hash) + getStorageInfo().hashCode(); } if (hasSoftwareVersion()) { hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER; hash = (53 * hash) + getSoftwareVersion().hashCode(); } if (hasCapabilities()) { hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCapabilities()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.NamespaceInfoProto} * *
     **
     * Namespace information that describes namespace on a namenode
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStorageInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); buildVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000001); unused_ = 0; bitField0_ = (bitField0_ & ~0x00000002); blockPoolID_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); softwareVersion_ = ""; bitField0_ = (bitField0_ & ~0x00000010); capabilities_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.buildVersion_ = buildVersion_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.unused_ = unused_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.blockPoolID_ = blockPoolID_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (storageInfoBuilder_ == null) { result.storageInfo_ = storageInfo_; } else { result.storageInfo_ = storageInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.softwareVersion_ = softwareVersion_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.capabilities_ = capabilities_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) return this; if (other.hasBuildVersion()) { bitField0_ |= 0x00000001; buildVersion_ = other.buildVersion_; onChanged(); } if (other.hasUnused()) { setUnused(other.getUnused()); } if (other.hasBlockPoolID()) { bitField0_ |= 0x00000004; blockPoolID_ = other.blockPoolID_; onChanged(); } if (other.hasStorageInfo()) { mergeStorageInfo(other.getStorageInfo()); } if (other.hasSoftwareVersion()) { bitField0_ |= 0x00000010; softwareVersion_ = other.softwareVersion_; onChanged(); } if (other.hasCapabilities()) { setCapabilities(other.getCapabilities()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBuildVersion()) { return false; } if (!hasUnused()) { return false; } if (!hasBlockPoolID()) { return false; } if (!hasStorageInfo()) { return false; } if (!hasSoftwareVersion()) { return false; } if (!getStorageInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string buildVersion = 1; private java.lang.Object buildVersion_ = ""; /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public boolean hasBuildVersion() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public java.lang.String getBuildVersion() { java.lang.Object ref = buildVersion_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); buildVersion_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public com.google.protobuf.ByteString getBuildVersionBytes() { java.lang.Object ref = buildVersion_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); buildVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public Builder setBuildVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; buildVersion_ = value; onChanged(); return this; } /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public Builder clearBuildVersion() { bitField0_ = (bitField0_ & ~0x00000001); buildVersion_ = getDefaultInstance().getBuildVersion(); onChanged(); return this; } /** * required string buildVersion = 1; * *
       * Software revision version (e.g. an svn or git revision)
       * 
*/ public Builder setBuildVersionBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; buildVersion_ = value; onChanged(); return this; } // required uint32 unused = 2; private int unused_ ; /** * required uint32 unused = 2; * *
       * Retained for backward compatibility
       * 
*/ public boolean hasUnused() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 unused = 2; * *
       * Retained for backward compatibility
       * 
*/ public int getUnused() { return unused_; } /** * required uint32 unused = 2; * *
       * Retained for backward compatibility
       * 
*/ public Builder setUnused(int value) { bitField0_ |= 0x00000002; unused_ = value; onChanged(); return this; } /** * required uint32 unused = 2; * *
       * Retained for backward compatibility
       * 
*/ public Builder clearUnused() { bitField0_ = (bitField0_ & ~0x00000002); unused_ = 0; onChanged(); return this; } // required string blockPoolID = 3; private java.lang.Object blockPoolID_ = ""; /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public boolean hasBlockPoolID() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public java.lang.String getBlockPoolID() { java.lang.Object ref = blockPoolID_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); blockPoolID_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public com.google.protobuf.ByteString getBlockPoolIDBytes() { java.lang.Object ref = blockPoolID_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolID_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public Builder setBlockPoolID( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; blockPoolID_ = value; onChanged(); return this; } /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public Builder clearBlockPoolID() { bitField0_ = (bitField0_ & ~0x00000004); blockPoolID_ = getDefaultInstance().getBlockPoolID(); onChanged(); return this; } /** * required string blockPoolID = 3; * *
       * block pool used by the namespace
       * 
*/ public Builder setBlockPoolIDBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; blockPoolID_ = value; onChanged(); return this; } // required .hadoop.hdfs.StorageInfoProto storageInfo = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public boolean hasStorageInfo() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { if (storageInfoBuilder_ == null) { return storageInfo_; } else { return storageInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storageInfo_ = value; onChanged(); } else { storageInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public Builder setStorageInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { if (storageInfoBuilder_ == null) { storageInfo_ = builderForValue.build(); onChanged(); } else { storageInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { if (storageInfoBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); } else { storageInfo_ = value; } onChanged(); } else { storageInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public Builder clearStorageInfo() { if (storageInfoBuilder_ == null) { storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { storageInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { bitField0_ |= 0x00000008; onChanged(); return getStorageInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { if (storageInfoBuilder_ != null) { return storageInfoBuilder_.getMessageOrBuilder(); } else { return storageInfo_; } } /** * required .hadoop.hdfs.StorageInfoProto storageInfo = 4; * *
       * Node information
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> getStorageInfoFieldBuilder() { if (storageInfoBuilder_ == null) { storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( storageInfo_, getParentForChildren(), isClean()); storageInfo_ = null; } return storageInfoBuilder_; } // required string softwareVersion = 5; private java.lang.Object softwareVersion_ = ""; /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public boolean hasSoftwareVersion() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public java.lang.String getSoftwareVersion() { java.lang.Object ref = softwareVersion_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); softwareVersion_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public com.google.protobuf.ByteString getSoftwareVersionBytes() { java.lang.Object ref = softwareVersion_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); softwareVersion_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public Builder setSoftwareVersion( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; softwareVersion_ = value; onChanged(); return this; } /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public Builder clearSoftwareVersion() { bitField0_ = (bitField0_ & ~0x00000010); softwareVersion_ = getDefaultInstance().getSoftwareVersion(); onChanged(); return this; } /** * required string softwareVersion = 5; * *
       * Software version number (e.g. 2.0.0)
       * 
*/ public Builder setSoftwareVersionBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; softwareVersion_ = value; onChanged(); return this; } // optional uint64 capabilities = 6 [default = 0]; private long capabilities_ ; /** * optional uint64 capabilities = 6 [default = 0]; * *
       * feature flags
       * 
*/ public boolean hasCapabilities() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 capabilities = 6 [default = 0]; * *
       * feature flags
       * 
*/ public long getCapabilities() { return capabilities_; } /** * optional uint64 capabilities = 6 [default = 0]; * *
       * feature flags
       * 
*/ public Builder setCapabilities(long value) { bitField0_ |= 0x00000020; capabilities_ = value; onChanged(); return this; } /** * optional uint64 capabilities = 6 [default = 0]; * *
       * feature flags
       * 
*/ public Builder clearCapabilities() { bitField0_ = (bitField0_ & ~0x00000020); capabilities_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamespaceInfoProto) } static { defaultInstance = new NamespaceInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.NamespaceInfoProto) } public interface BlockKeyProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 keyId = 1; /** * required uint32 keyId = 1; * *
     * Key identifier
     * 
*/ boolean hasKeyId(); /** * required uint32 keyId = 1; * *
     * Key identifier
     * 
*/ int getKeyId(); // required uint64 expiryDate = 2; /** * required uint64 expiryDate = 2; * *
     * Expiry time in milliseconds
     * 
*/ boolean hasExpiryDate(); /** * required uint64 expiryDate = 2; * *
     * Expiry time in milliseconds
     * 
*/ long getExpiryDate(); // optional bytes keyBytes = 3; /** * optional bytes keyBytes = 3; * *
     * Key secret
     * 
*/ boolean hasKeyBytes(); /** * optional bytes keyBytes = 3; * *
     * Key secret
     * 
*/ com.google.protobuf.ByteString getKeyBytes(); } /** * Protobuf type {@code hadoop.hdfs.BlockKeyProto} * *
   **
   * Block access token information
   * 
*/ public static final class BlockKeyProto extends com.google.protobuf.GeneratedMessage implements BlockKeyProtoOrBuilder { // Use BlockKeyProto.newBuilder() to construct. private BlockKeyProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private BlockKeyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final BlockKeyProto defaultInstance; public static BlockKeyProto getDefaultInstance() { return defaultInstance; } public BlockKeyProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BlockKeyProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; keyId_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; expiryDate_ = input.readUInt64(); break; } case 26: { bitField0_ |= 0x00000004; keyBytes_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public BlockKeyProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BlockKeyProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint32 keyId = 1; public static final int KEYID_FIELD_NUMBER = 1; private int keyId_; /** * required uint32 keyId = 1; * *
     * Key identifier
     * 
*/ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 keyId = 1; * *
     * Key identifier
     * 
*/ public int getKeyId() { return keyId_; } // required uint64 expiryDate = 2; public static final int EXPIRYDATE_FIELD_NUMBER = 2; private long expiryDate_; /** * required uint64 expiryDate = 2; * *
     * Expiry time in milliseconds
     * 
*/ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 expiryDate = 2; * *
     * Expiry time in milliseconds
     * 
*/ public long getExpiryDate() { return expiryDate_; } // optional bytes keyBytes = 3; public static final int KEYBYTES_FIELD_NUMBER = 3; private com.google.protobuf.ByteString keyBytes_; /** * optional bytes keyBytes = 3; * *
     * Key secret
     * 
*/ public boolean hasKeyBytes() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes keyBytes = 3; * *
     * Key secret
     * 
*/ public com.google.protobuf.ByteString getKeyBytes() { return keyBytes_; } private void initFields() { keyId_ = 0; expiryDate_ = 0L; keyBytes_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasKeyId()) { memoizedIsInitialized = 0; return false; } if (!hasExpiryDate()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, keyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, expiryDate_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, keyBytes_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, keyId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, expiryDate_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, keyBytes_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) obj; boolean result = true; result = result && (hasKeyId() == other.hasKeyId()); if (hasKeyId()) { result = result && (getKeyId() == other.getKeyId()); } result = result && (hasExpiryDate() == other.hasExpiryDate()); if (hasExpiryDate()) { result = result && (getExpiryDate() == other.getExpiryDate()); } result = result && (hasKeyBytes() == other.hasKeyBytes()); if (hasKeyBytes()) { result = result && getKeyBytes() .equals(other.getKeyBytes()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasKeyId()) { hash = (37 * hash) + KEYID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } if (hasExpiryDate()) { hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getExpiryDate()); } if (hasKeyBytes()) { hash = (37 * hash) + KEYBYTES_FIELD_NUMBER; hash = (53 * hash) + getKeyBytes().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.BlockKeyProto} * *
     **
     * Block access token information
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); keyId_ = 0; bitField0_ = (bitField0_ & ~0x00000001); expiryDate_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); keyBytes_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockKeyProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.keyId_ = keyId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.expiryDate_ = expiryDate_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.keyBytes_ = keyBytes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) return this; if (other.hasKeyId()) { setKeyId(other.getKeyId()); } if (other.hasExpiryDate()) { setExpiryDate(other.getExpiryDate()); } if (other.hasKeyBytes()) { setKeyBytes(other.getKeyBytes()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasKeyId()) { return false; } if (!hasExpiryDate()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 keyId = 1; private int keyId_ ; /** * required uint32 keyId = 1; * *
       * Key identifier
       * 
*/ public boolean hasKeyId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint32 keyId = 1; * *
       * Key identifier
       * 
*/ public int getKeyId() { return keyId_; } /** * required uint32 keyId = 1; * *
       * Key identifier
       * 
*/ public Builder setKeyId(int value) { bitField0_ |= 0x00000001; keyId_ = value; onChanged(); return this; } /** * required uint32 keyId = 1; * *
       * Key identifier
       * 
*/ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000001); keyId_ = 0; onChanged(); return this; } // required uint64 expiryDate = 2; private long expiryDate_ ; /** * required uint64 expiryDate = 2; * *
       * Expiry time in milliseconds
       * 
*/ public boolean hasExpiryDate() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 expiryDate = 2; * *
       * Expiry time in milliseconds
       * 
*/ public long getExpiryDate() { return expiryDate_; } /** * required uint64 expiryDate = 2; * *
       * Expiry time in milliseconds
       * 
*/ public Builder setExpiryDate(long value) { bitField0_ |= 0x00000002; expiryDate_ = value; onChanged(); return this; } /** * required uint64 expiryDate = 2; * *
       * Expiry time in milliseconds
       * 
*/ public Builder clearExpiryDate() { bitField0_ = (bitField0_ & ~0x00000002); expiryDate_ = 0L; onChanged(); return this; } // optional bytes keyBytes = 3; private com.google.protobuf.ByteString keyBytes_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes keyBytes = 3; * *
       * Key secret
       * 
*/ public boolean hasKeyBytes() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes keyBytes = 3; * *
       * Key secret
       * 
*/ public com.google.protobuf.ByteString getKeyBytes() { return keyBytes_; } /** * optional bytes keyBytes = 3; * *
       * Key secret
       * 
*/ public Builder setKeyBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; keyBytes_ = value; onChanged(); return this; } /** * optional bytes keyBytes = 3; * *
       * Key secret
       * 
*/ public Builder clearKeyBytes() { bitField0_ = (bitField0_ & ~0x00000004); keyBytes_ = getDefaultInstance().getKeyBytes(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockKeyProto) } static { defaultInstance = new BlockKeyProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockKeyProto) } public interface ExportedBlockKeysProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bool isBlockTokenEnabled = 1; /** * required bool isBlockTokenEnabled = 1; */ boolean hasIsBlockTokenEnabled(); /** * required bool isBlockTokenEnabled = 1; */ boolean getIsBlockTokenEnabled(); // required uint64 keyUpdateInterval = 2; /** * required uint64 keyUpdateInterval = 2; */ boolean hasKeyUpdateInterval(); /** * required uint64 keyUpdateInterval = 2; */ long getKeyUpdateInterval(); // required uint64 tokenLifeTime = 3; /** * required uint64 tokenLifeTime = 3; */ boolean hasTokenLifeTime(); /** * required uint64 tokenLifeTime = 3; */ long getTokenLifeTime(); // required .hadoop.hdfs.BlockKeyProto currentKey = 4; /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ boolean hasCurrentKey(); /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey(); /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder(); // repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ java.util.List getAllKeysList(); /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index); /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ int getAllKeysCount(); /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ java.util.List getAllKeysOrBuilderList(); /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.ExportedBlockKeysProto} * *
   **
   * Current key and set of block keys at the namenode.
   * 
*/ public static final class ExportedBlockKeysProto extends com.google.protobuf.GeneratedMessage implements ExportedBlockKeysProtoOrBuilder { // Use ExportedBlockKeysProto.newBuilder() to construct. private ExportedBlockKeysProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ExportedBlockKeysProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ExportedBlockKeysProto defaultInstance; public static ExportedBlockKeysProto getDefaultInstance() { return defaultInstance; } public ExportedBlockKeysProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ExportedBlockKeysProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; isBlockTokenEnabled_ = input.readBool(); break; } case 16: { bitField0_ |= 0x00000002; keyUpdateInterval_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; tokenLifeTime_ = input.readUInt64(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = currentKey_.toBuilder(); } currentKey_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(currentKey_); currentKey_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } case 42: { if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { allKeys_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000010; } allKeys_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { allKeys_ = java.util.Collections.unmodifiableList(allKeys_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public ExportedBlockKeysProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ExportedBlockKeysProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool isBlockTokenEnabled = 1; public static final int ISBLOCKTOKENENABLED_FIELD_NUMBER = 1; private boolean isBlockTokenEnabled_; /** * required bool isBlockTokenEnabled = 1; */ public boolean hasIsBlockTokenEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool isBlockTokenEnabled = 1; */ public boolean getIsBlockTokenEnabled() { return isBlockTokenEnabled_; } // required uint64 keyUpdateInterval = 2; public static final int KEYUPDATEINTERVAL_FIELD_NUMBER = 2; private long keyUpdateInterval_; /** * required uint64 keyUpdateInterval = 2; */ public boolean hasKeyUpdateInterval() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 keyUpdateInterval = 2; */ public long getKeyUpdateInterval() { return keyUpdateInterval_; } // required uint64 tokenLifeTime = 3; public static final int TOKENLIFETIME_FIELD_NUMBER = 3; private long tokenLifeTime_; /** * required uint64 tokenLifeTime = 3; */ public boolean hasTokenLifeTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 tokenLifeTime = 3; */ public long getTokenLifeTime() { return tokenLifeTime_; } // required .hadoop.hdfs.BlockKeyProto currentKey = 4; public static final int CURRENTKEY_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_; /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public boolean hasCurrentKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { return currentKey_; } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { return currentKey_; } // repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; public static final int ALLKEYS_FIELD_NUMBER = 5; private java.util.List allKeys_; /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public java.util.List getAllKeysList() { return allKeys_; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public java.util.List getAllKeysOrBuilderList() { return allKeys_; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public int getAllKeysCount() { return allKeys_.size(); } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { return allKeys_.get(index); } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( int index) { return allKeys_.get(index); } private void initFields() { isBlockTokenEnabled_ = false; keyUpdateInterval_ = 0L; tokenLifeTime_ = 0L; currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); allKeys_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasIsBlockTokenEnabled()) { memoizedIsInitialized = 0; return false; } if (!hasKeyUpdateInterval()) { memoizedIsInitialized = 0; return false; } if (!hasTokenLifeTime()) { memoizedIsInitialized = 0; return false; } if (!hasCurrentKey()) { memoizedIsInitialized = 0; return false; } if (!getCurrentKey().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getAllKeysCount(); i++) { if (!getAllKeys(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, isBlockTokenEnabled_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, keyUpdateInterval_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, tokenLifeTime_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, currentKey_); } for (int i = 0; i < allKeys_.size(); i++) { output.writeMessage(5, allKeys_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(1, isBlockTokenEnabled_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, keyUpdateInterval_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, tokenLifeTime_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, currentKey_); } for (int i = 0; i < allKeys_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, allKeys_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) obj; boolean result = true; result = result && (hasIsBlockTokenEnabled() == other.hasIsBlockTokenEnabled()); if (hasIsBlockTokenEnabled()) { result = result && (getIsBlockTokenEnabled() == other.getIsBlockTokenEnabled()); } result = result && (hasKeyUpdateInterval() == other.hasKeyUpdateInterval()); if (hasKeyUpdateInterval()) { result = result && (getKeyUpdateInterval() == other.getKeyUpdateInterval()); } result = result && (hasTokenLifeTime() == other.hasTokenLifeTime()); if (hasTokenLifeTime()) { result = result && (getTokenLifeTime() == other.getTokenLifeTime()); } result = result && (hasCurrentKey() == other.hasCurrentKey()); if (hasCurrentKey()) { result = result && getCurrentKey() .equals(other.getCurrentKey()); } result = result && getAllKeysList() .equals(other.getAllKeysList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasIsBlockTokenEnabled()) { hash = (37 * hash) + ISBLOCKTOKENENABLED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsBlockTokenEnabled()); } if (hasKeyUpdateInterval()) { hash = (37 * hash) + KEYUPDATEINTERVAL_FIELD_NUMBER; hash = (53 * hash) + hashLong(getKeyUpdateInterval()); } if (hasTokenLifeTime()) { hash = (37 * hash) + TOKENLIFETIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTokenLifeTime()); } if (hasCurrentKey()) { hash = (37 * hash) + CURRENTKEY_FIELD_NUMBER; hash = (53 * hash) + getCurrentKey().hashCode(); } if (getAllKeysCount() > 0) { hash = (37 * hash) + ALLKEYS_FIELD_NUMBER; hash = (53 * hash) + getAllKeysList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ExportedBlockKeysProto} * *
     **
     * Current key and set of block keys at the namenode.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCurrentKeyFieldBuilder(); getAllKeysFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); isBlockTokenEnabled_ = false; bitField0_ = (bitField0_ & ~0x00000001); keyUpdateInterval_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); tokenLifeTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); if (currentKeyBuilder_ == null) { currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); } else { currentKeyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (allKeysBuilder_ == null) { allKeys_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); } else { allKeysBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.isBlockTokenEnabled_ = isBlockTokenEnabled_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.keyUpdateInterval_ = keyUpdateInterval_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.tokenLifeTime_ = tokenLifeTime_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (currentKeyBuilder_ == null) { result.currentKey_ = currentKey_; } else { result.currentKey_ = currentKeyBuilder_.build(); } if (allKeysBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010)) { allKeys_ = java.util.Collections.unmodifiableList(allKeys_); bitField0_ = (bitField0_ & ~0x00000010); } result.allKeys_ = allKeys_; } else { result.allKeys_ = allKeysBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) return this; if (other.hasIsBlockTokenEnabled()) { setIsBlockTokenEnabled(other.getIsBlockTokenEnabled()); } if (other.hasKeyUpdateInterval()) { setKeyUpdateInterval(other.getKeyUpdateInterval()); } if (other.hasTokenLifeTime()) { setTokenLifeTime(other.getTokenLifeTime()); } if (other.hasCurrentKey()) { mergeCurrentKey(other.getCurrentKey()); } if (allKeysBuilder_ == null) { if (!other.allKeys_.isEmpty()) { if (allKeys_.isEmpty()) { allKeys_ = other.allKeys_; bitField0_ = (bitField0_ & ~0x00000010); } else { ensureAllKeysIsMutable(); allKeys_.addAll(other.allKeys_); } onChanged(); } } else { if (!other.allKeys_.isEmpty()) { if (allKeysBuilder_.isEmpty()) { allKeysBuilder_.dispose(); allKeysBuilder_ = null; allKeys_ = other.allKeys_; bitField0_ = (bitField0_ & ~0x00000010); allKeysBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getAllKeysFieldBuilder() : null; } else { allKeysBuilder_.addAllMessages(other.allKeys_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasIsBlockTokenEnabled()) { return false; } if (!hasKeyUpdateInterval()) { return false; } if (!hasTokenLifeTime()) { return false; } if (!hasCurrentKey()) { return false; } if (!getCurrentKey().isInitialized()) { return false; } for (int i = 0; i < getAllKeysCount(); i++) { if (!getAllKeys(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool isBlockTokenEnabled = 1; private boolean isBlockTokenEnabled_ ; /** * required bool isBlockTokenEnabled = 1; */ public boolean hasIsBlockTokenEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool isBlockTokenEnabled = 1; */ public boolean getIsBlockTokenEnabled() { return isBlockTokenEnabled_; } /** * required bool isBlockTokenEnabled = 1; */ public Builder setIsBlockTokenEnabled(boolean value) { bitField0_ |= 0x00000001; isBlockTokenEnabled_ = value; onChanged(); return this; } /** * required bool isBlockTokenEnabled = 1; */ public Builder clearIsBlockTokenEnabled() { bitField0_ = (bitField0_ & ~0x00000001); isBlockTokenEnabled_ = false; onChanged(); return this; } // required uint64 keyUpdateInterval = 2; private long keyUpdateInterval_ ; /** * required uint64 keyUpdateInterval = 2; */ public boolean hasKeyUpdateInterval() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 keyUpdateInterval = 2; */ public long getKeyUpdateInterval() { return keyUpdateInterval_; } /** * required uint64 keyUpdateInterval = 2; */ public Builder setKeyUpdateInterval(long value) { bitField0_ |= 0x00000002; keyUpdateInterval_ = value; onChanged(); return this; } /** * required uint64 keyUpdateInterval = 2; */ public Builder clearKeyUpdateInterval() { bitField0_ = (bitField0_ & ~0x00000002); keyUpdateInterval_ = 0L; onChanged(); return this; } // required uint64 tokenLifeTime = 3; private long tokenLifeTime_ ; /** * required uint64 tokenLifeTime = 3; */ public boolean hasTokenLifeTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 tokenLifeTime = 3; */ public long getTokenLifeTime() { return tokenLifeTime_; } /** * required uint64 tokenLifeTime = 3; */ public Builder setTokenLifeTime(long value) { bitField0_ |= 0x00000004; tokenLifeTime_ = value; onChanged(); return this; } /** * required uint64 tokenLifeTime = 3; */ public Builder clearTokenLifeTime() { bitField0_ = (bitField0_ & ~0x00000004); tokenLifeTime_ = 0L; onChanged(); return this; } // required .hadoop.hdfs.BlockKeyProto currentKey = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> currentKeyBuilder_; /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public boolean hasCurrentKey() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { if (currentKeyBuilder_ == null) { return currentKey_; } else { return currentKeyBuilder_.getMessage(); } } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public Builder setCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { if (currentKeyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } currentKey_ = value; onChanged(); } else { currentKeyBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public Builder setCurrentKey( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { if (currentKeyBuilder_ == null) { currentKey_ = builderForValue.build(); onChanged(); } else { currentKeyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public Builder mergeCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { if (currentKeyBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && currentKey_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) { currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(currentKey_).mergeFrom(value).buildPartial(); } else { currentKey_ = value; } onChanged(); } else { currentKeyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public Builder clearCurrentKey() { if (currentKeyBuilder_ == null) { currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); onChanged(); } else { currentKeyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getCurrentKeyBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCurrentKeyFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { if (currentKeyBuilder_ != null) { return currentKeyBuilder_.getMessageOrBuilder(); } else { return currentKey_; } } /** * required .hadoop.hdfs.BlockKeyProto currentKey = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> getCurrentKeyFieldBuilder() { if (currentKeyBuilder_ == null) { currentKeyBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( currentKey_, getParentForChildren(), isClean()); currentKey_ = null; } return currentKeyBuilder_; } // repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; private java.util.List allKeys_ = java.util.Collections.emptyList(); private void ensureAllKeysIsMutable() { if (!((bitField0_ & 0x00000010) == 0x00000010)) { allKeys_ = new java.util.ArrayList(allKeys_); bitField0_ |= 0x00000010; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> allKeysBuilder_; /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public java.util.List getAllKeysList() { if (allKeysBuilder_ == null) { return java.util.Collections.unmodifiableList(allKeys_); } else { return allKeysBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public int getAllKeysCount() { if (allKeysBuilder_ == null) { return allKeys_.size(); } else { return allKeysBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { if (allKeysBuilder_ == null) { return allKeys_.get(index); } else { return allKeysBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder setAllKeys( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { if (allKeysBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAllKeysIsMutable(); allKeys_.set(index, value); onChanged(); } else { allKeysBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder setAllKeys( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { if (allKeysBuilder_ == null) { ensureAllKeysIsMutable(); allKeys_.set(index, builderForValue.build()); onChanged(); } else { allKeysBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder addAllKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { if (allKeysBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAllKeysIsMutable(); allKeys_.add(value); onChanged(); } else { allKeysBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder addAllKeys( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { if (allKeysBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAllKeysIsMutable(); allKeys_.add(index, value); onChanged(); } else { allKeysBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder addAllKeys( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { if (allKeysBuilder_ == null) { ensureAllKeysIsMutable(); allKeys_.add(builderForValue.build()); onChanged(); } else { allKeysBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder addAllKeys( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { if (allKeysBuilder_ == null) { ensureAllKeysIsMutable(); allKeys_.add(index, builderForValue.build()); onChanged(); } else { allKeysBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder addAllAllKeys( java.lang.Iterable values) { if (allKeysBuilder_ == null) { ensureAllKeysIsMutable(); super.addAll(values, allKeys_); onChanged(); } else { allKeysBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder clearAllKeys() { if (allKeysBuilder_ == null) { allKeys_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); } else { allKeysBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public Builder removeAllKeys(int index) { if (allKeysBuilder_ == null) { ensureAllKeysIsMutable(); allKeys_.remove(index); onChanged(); } else { allKeysBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getAllKeysBuilder( int index) { return getAllKeysFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( int index) { if (allKeysBuilder_ == null) { return allKeys_.get(index); } else { return allKeysBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public java.util.List getAllKeysOrBuilderList() { if (allKeysBuilder_ != null) { return allKeysBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(allKeys_); } } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder() { return getAllKeysFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder( int index) { return getAllKeysFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockKeyProto allKeys = 5; */ public java.util.List getAllKeysBuilderList() { return getAllKeysFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> getAllKeysFieldBuilder() { if (allKeysBuilder_ == null) { allKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( allKeys_, ((bitField0_ & 0x00000010) == 0x00000010), getParentForChildren(), isClean()); allKeys_ = null; } return allKeysBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExportedBlockKeysProto) } static { defaultInstance = new ExportedBlockKeysProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ExportedBlockKeysProto) } public interface RecoveringBlockProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 newGenStamp = 1; /** * required uint64 newGenStamp = 1; * *
     * New genstamp post recovery
     * 
*/ boolean hasNewGenStamp(); /** * required uint64 newGenStamp = 1; * *
     * New genstamp post recovery
     * 
*/ long getNewGenStamp(); // required .hadoop.hdfs.LocatedBlockProto block = 2; /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ boolean hasBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); // optional .hadoop.hdfs.BlockProto truncateBlock = 3; /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ boolean hasTruncateBlock(); /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock(); /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.RecoveringBlockProto} * *
   **
   * Block that needs to be recovered with at a given location
   * 
*/ public static final class RecoveringBlockProto extends com.google.protobuf.GeneratedMessage implements RecoveringBlockProtoOrBuilder { // Use RecoveringBlockProto.newBuilder() to construct. private RecoveringBlockProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RecoveringBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RecoveringBlockProto defaultInstance; public static RecoveringBlockProto getDefaultInstance() { return defaultInstance; } public RecoveringBlockProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RecoveringBlockProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; newGenStamp_ = input.readUInt64(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = truncateBlock_.toBuilder(); } truncateBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(truncateBlock_); truncateBlock_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RecoveringBlockProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RecoveringBlockProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 newGenStamp = 1; public static final int NEWGENSTAMP_FIELD_NUMBER = 1; private long newGenStamp_; /** * required uint64 newGenStamp = 1; * *
     * New genstamp post recovery
     * 
*/ public boolean hasNewGenStamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 newGenStamp = 1; * *
     * New genstamp post recovery
     * 
*/ public long getNewGenStamp() { return newGenStamp_; } // required .hadoop.hdfs.LocatedBlockProto block = 2; public static final int BLOCK_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ public boolean hasBlock() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
     * Block to be recovered
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } // optional .hadoop.hdfs.BlockProto truncateBlock = 3; public static final int TRUNCATEBLOCK_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto truncateBlock_; /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ public boolean hasTruncateBlock() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock() { return truncateBlock_; } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
     * New block for recovery (truncate)
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder() { return truncateBlock_; } private void initFields() { newGenStamp_ = 0L; block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasNewGenStamp()) { memoizedIsInitialized = 0; return false; } if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasTruncateBlock()) { if (!getTruncateBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, newGenStamp_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, block_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, truncateBlock_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, newGenStamp_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, block_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, truncateBlock_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) obj; boolean result = true; result = result && (hasNewGenStamp() == other.hasNewGenStamp()); if (hasNewGenStamp()) { result = result && (getNewGenStamp() == other.getNewGenStamp()); } result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && (hasTruncateBlock() == other.hasTruncateBlock()); if (hasTruncateBlock()) { result = result && getTruncateBlock() .equals(other.getTruncateBlock()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasNewGenStamp()) { hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNewGenStamp()); } if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } if (hasTruncateBlock()) { hash = (37 * hash) + TRUNCATEBLOCK_FIELD_NUMBER; hash = (53 * hash) + getTruncateBlock().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RecoveringBlockProto} * *
     **
     * Block that needs to be recovered with at a given location
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); getTruncateBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); newGenStamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); if (truncateBlockBuilder_ == null) { truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); } else { truncateBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.newGenStamp_ = newGenStamp_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (truncateBlockBuilder_ == null) { result.truncateBlock_ = truncateBlock_; } else { result.truncateBlock_ = truncateBlockBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) return this; if (other.hasNewGenStamp()) { setNewGenStamp(other.getNewGenStamp()); } if (other.hasBlock()) { mergeBlock(other.getBlock()); } if (other.hasTruncateBlock()) { mergeTruncateBlock(other.getTruncateBlock()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasNewGenStamp()) { return false; } if (!hasBlock()) { return false; } if (!getBlock().isInitialized()) { return false; } if (hasTruncateBlock()) { if (!getTruncateBlock().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 newGenStamp = 1; private long newGenStamp_ ; /** * required uint64 newGenStamp = 1; * *
       * New genstamp post recovery
       * 
*/ public boolean hasNewGenStamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 newGenStamp = 1; * *
       * New genstamp post recovery
       * 
*/ public long getNewGenStamp() { return newGenStamp_; } /** * required uint64 newGenStamp = 1; * *
       * New genstamp post recovery
       * 
*/ public Builder setNewGenStamp(long value) { bitField0_ |= 0x00000001; newGenStamp_ = value; onChanged(); return this; } /** * required uint64 newGenStamp = 1; * *
       * New genstamp post recovery
       * 
*/ public Builder clearNewGenStamp() { bitField0_ = (bitField0_ & ~0x00000001); newGenStamp_ = 0L; onChanged(); return this; } // required .hadoop.hdfs.LocatedBlockProto block = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public boolean hasBlock() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000002; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.LocatedBlockProto block = 2; * *
       * Block to be recovered
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // optional .hadoop.hdfs.BlockProto truncateBlock = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> truncateBlockBuilder_; /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public boolean hasTruncateBlock() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getTruncateBlock() { if (truncateBlockBuilder_ == null) { return truncateBlock_; } else { return truncateBlockBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public Builder setTruncateBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (truncateBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } truncateBlock_ = value; onChanged(); } else { truncateBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public Builder setTruncateBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { if (truncateBlockBuilder_ == null) { truncateBlock_ = builderForValue.build(); onChanged(); } else { truncateBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public Builder mergeTruncateBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (truncateBlockBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && truncateBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(truncateBlock_).mergeFrom(value).buildPartial(); } else { truncateBlock_ = value; } onChanged(); } else { truncateBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public Builder clearTruncateBlock() { if (truncateBlockBuilder_ == null) { truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); onChanged(); } else { truncateBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getTruncateBlockBuilder() { bitField0_ |= 0x00000004; onChanged(); return getTruncateBlockFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getTruncateBlockOrBuilder() { if (truncateBlockBuilder_ != null) { return truncateBlockBuilder_.getMessageOrBuilder(); } else { return truncateBlock_; } } /** * optional .hadoop.hdfs.BlockProto truncateBlock = 3; * *
       * New block for recovery (truncate)
       * 
*/ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> getTruncateBlockFieldBuilder() { if (truncateBlockBuilder_ == null) { truncateBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( truncateBlock_, getParentForChildren(), isClean()); truncateBlock_ = null; } return truncateBlockBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RecoveringBlockProto) } static { defaultInstance = new RecoveringBlockProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RecoveringBlockProto) } public interface VersionRequestProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.VersionRequestProto} * *
   **
   * void request
   * 
*/ public static final class VersionRequestProto extends com.google.protobuf.GeneratedMessage implements VersionRequestProtoOrBuilder { // Use VersionRequestProto.newBuilder() to construct. private VersionRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private VersionRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final VersionRequestProto defaultInstance; public static VersionRequestProto getDefaultInstance() { return defaultInstance; } public VersionRequestProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private VersionRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public VersionRequestProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new VersionRequestProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.VersionRequestProto} * *
     **
     * void request
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.VersionRequestProto) } static { defaultInstance = new VersionRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.VersionRequestProto) } public interface VersionResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.NamespaceInfoProto info = 1; /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo(); /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.VersionResponseProto} * *
   **
   * Version response from namenode.
   * 
*/ public static final class VersionResponseProto extends com.google.protobuf.GeneratedMessage implements VersionResponseProtoOrBuilder { // Use VersionResponseProto.newBuilder() to construct. private VersionResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private VersionResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final VersionResponseProto defaultInstance; public static VersionResponseProto getDefaultInstance() { return defaultInstance; } public VersionResponseProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private VersionResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public VersionResponseProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new VersionResponseProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.NamespaceInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_; /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() { return info_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } if (!getInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.VersionResponseProto} * *
     **
     * Version response from namenode.
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_VersionResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } if (!getInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.NamespaceInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.NamespaceInfoProto info = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.VersionResponseProto) } static { defaultInstance = new VersionResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.VersionResponseProto) } public interface SnapshotInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string snapshotName = 1; /** * required string snapshotName = 1; */ boolean hasSnapshotName(); /** * required string snapshotName = 1; */ java.lang.String getSnapshotName(); /** * required string snapshotName = 1; */ com.google.protobuf.ByteString getSnapshotNameBytes(); // required string snapshotRoot = 2; /** * required string snapshotRoot = 2; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 2; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 2; */ com.google.protobuf.ByteString getSnapshotRootBytes(); // required .hadoop.hdfs.FsPermissionProto permission = 3; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); // required string owner = 4; /** * required string owner = 4; */ boolean hasOwner(); /** * required string owner = 4; */ java.lang.String getOwner(); /** * required string owner = 4; */ com.google.protobuf.ByteString getOwnerBytes(); // required string group = 5; /** * required string group = 5; */ boolean hasGroup(); /** * required string group = 5; */ java.lang.String getGroup(); /** * required string group = 5; */ com.google.protobuf.ByteString getGroupBytes(); // required string createTime = 6; /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ boolean hasCreateTime(); /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ java.lang.String getCreateTime(); /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ com.google.protobuf.ByteString getCreateTimeBytes(); } /** * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} * *
   **
   * Information related to a snapshot
   * TODO: add more information
   * 
*/ public static final class SnapshotInfoProto extends com.google.protobuf.GeneratedMessage implements SnapshotInfoProtoOrBuilder { // Use SnapshotInfoProto.newBuilder() to construct. private SnapshotInfoProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SnapshotInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SnapshotInfoProto defaultInstance; public static SnapshotInfoProto getDefaultInstance() { return defaultInstance; } public SnapshotInfoProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SnapshotInfoProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotName_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; snapshotRoot_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = permission_.toBuilder(); } permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(permission_); permission_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { bitField0_ |= 0x00000008; owner_ = input.readBytes(); break; } case 42: { bitField0_ |= 0x00000010; group_ = input.readBytes(); break; } case 50: { bitField0_ |= 0x00000020; createTime_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public SnapshotInfoProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new SnapshotInfoProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotName = 1; public static final int SNAPSHOTNAME_FIELD_NUMBER = 1; private java.lang.Object snapshotName_; /** * required string snapshotName = 1; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotName = 1; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } } /** * required string snapshotName = 1; */ public com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string snapshotRoot = 2; public static final int SNAPSHOTROOT_FIELD_NUMBER = 2; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 2; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotRoot = 2; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 2; */ public com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.FsPermissionProto permission = 3; public static final int PERMISSION_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public boolean hasPermission() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { return permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_; } // required string owner = 4; public static final int OWNER_FIELD_NUMBER = 4; private java.lang.Object owner_; /** * required string owner = 4; */ public boolean hasOwner() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string owner = 4; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { owner_ = s; } return s; } } /** * required string owner = 4; */ public com.google.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string group = 5; public static final int GROUP_FIELD_NUMBER = 5; private java.lang.Object group_; /** * required string group = 5; */ public boolean hasGroup() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string group = 5; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { group_ = s; } return s; } } /** * required string group = 5; */ public com.google.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // required string createTime = 6; public static final int CREATETIME_FIELD_NUMBER = 6; private java.lang.Object createTime_; /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { createTime_ = s; } return s; } } /** * required string createTime = 6; * *
     * TODO: do we need access time?
     * 
*/ public com.google.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotName_ = ""; snapshotRoot_ = ""; permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); owner_ = ""; group_ = ""; createTime_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotName()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!hasOwner()) { memoizedIsInitialized = 0; return false; } if (!hasGroup()) { memoizedIsInitialized = 0; return false; } if (!hasCreateTime()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, permission_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getOwnerBytes()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, getGroupBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getCreateTimeBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, permission_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, getOwnerBytes()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(5, getGroupBytes()); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getCreateTimeBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) obj; boolean result = true; result = result && (hasSnapshotName() == other.hasSnapshotName()); if (hasSnapshotName()) { result = result && getSnapshotName() .equals(other.getSnapshotName()); } result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasPermission() == other.hasPermission()); if (hasPermission()) { result = result && getPermission() .equals(other.getPermission()); } result = result && (hasOwner() == other.hasOwner()); if (hasOwner()) { result = result && getOwner() .equals(other.getOwner()); } result = result && (hasGroup() == other.hasGroup()); if (hasGroup()) { result = result && getGroup() .equals(other.getGroup()); } result = result && (hasCreateTime() == other.hasCreateTime()); if (hasCreateTime()) { result = result && getCreateTime() .equals(other.getCreateTime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotName()) { hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotName().hashCode(); } if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } if (hasOwner()) { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } if (hasGroup()) { hash = (37 * hash) + GROUP_FIELD_NUMBER; hash = (53 * hash) + getGroup().hashCode(); } if (hasCreateTime()) { hash = (37 * hash) + CREATETIME_FIELD_NUMBER; hash = (53 * hash) + getCreateTime().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto} * *
     **
     * Information related to a snapshot
     * TODO: add more information
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPermissionFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000008); group_ = ""; bitField0_ = (bitField0_ & ~0x00000010); createTime_ = ""; bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotName_ = snapshotName_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (permissionBuilder_ == null) { result.permission_ = permission_; } else { result.permission_ = permissionBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.owner_ = owner_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.group_ = group_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.createTime_ = createTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance()) return this; if (other.hasSnapshotName()) { bitField0_ |= 0x00000001; snapshotName_ = other.snapshotName_; onChanged(); } if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000002; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasPermission()) { mergePermission(other.getPermission()); } if (other.hasOwner()) { bitField0_ |= 0x00000008; owner_ = other.owner_; onChanged(); } if (other.hasGroup()) { bitField0_ |= 0x00000010; group_ = other.group_; onChanged(); } if (other.hasCreateTime()) { bitField0_ |= 0x00000020; createTime_ = other.createTime_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotName()) { return false; } if (!hasSnapshotRoot()) { return false; } if (!hasPermission()) { return false; } if (!hasOwner()) { return false; } if (!hasGroup()) { return false; } if (!hasCreateTime()) { return false; } if (!getPermission().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotName = 1; private java.lang.Object snapshotName_ = ""; /** * required string snapshotName = 1; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotName = 1; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotName = 1; */ public com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string snapshotName = 1; */ public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotName_ = value; onChanged(); return this; } /** * required string snapshotName = 1; */ public Builder clearSnapshotName() { bitField0_ = (bitField0_ & ~0x00000001); snapshotName_ = getDefaultInstance().getSnapshotName(); onChanged(); return this; } /** * required string snapshotName = 1; */ public Builder setSnapshotNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotName_ = value; onChanged(); return this; } // required string snapshotRoot = 2; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 2; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotRoot = 2; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 2; */ public com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 2; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 2; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000002); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 2; */ public Builder setSnapshotRootBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotRoot_ = value; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto permission = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public boolean hasPermission() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; onChanged(); } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); onChanged(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); } else { permission_ = value; } onChanged(); } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public Builder clearPermission() { if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 3; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( permission_, getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } // required string owner = 4; private java.lang.Object owner_ = ""; /** * required string owner = 4; */ public boolean hasOwner() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string owner = 4; */ public java.lang.String getOwner() { java.lang.Object ref = owner_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); owner_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string owner = 4; */ public com.google.protobuf.ByteString getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string owner = 4; */ public Builder setOwner( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; owner_ = value; onChanged(); return this; } /** * required string owner = 4; */ public Builder clearOwner() { bitField0_ = (bitField0_ & ~0x00000008); owner_ = getDefaultInstance().getOwner(); onChanged(); return this; } /** * required string owner = 4; */ public Builder setOwnerBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; owner_ = value; onChanged(); return this; } // required string group = 5; private java.lang.Object group_ = ""; /** * required string group = 5; */ public boolean hasGroup() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required string group = 5; */ public java.lang.String getGroup() { java.lang.Object ref = group_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); group_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string group = 5; */ public com.google.protobuf.ByteString getGroupBytes() { java.lang.Object ref = group_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); group_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string group = 5; */ public Builder setGroup( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; group_ = value; onChanged(); return this; } /** * required string group = 5; */ public Builder clearGroup() { bitField0_ = (bitField0_ & ~0x00000010); group_ = getDefaultInstance().getGroup(); onChanged(); return this; } /** * required string group = 5; */ public Builder setGroupBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; group_ = value; onChanged(); return this; } // required string createTime = 6; private java.lang.Object createTime_ = ""; /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public boolean hasCreateTime() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public java.lang.String getCreateTime() { java.lang.Object ref = createTime_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); createTime_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public com.google.protobuf.ByteString getCreateTimeBytes() { java.lang.Object ref = createTime_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); createTime_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public Builder setCreateTime( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; createTime_ = value; onChanged(); return this; } /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public Builder clearCreateTime() { bitField0_ = (bitField0_ & ~0x00000020); createTime_ = getDefaultInstance().getCreateTime(); onChanged(); return this; } /** * required string createTime = 6; * *
       * TODO: do we need access time?
       * 
*/ public Builder setCreateTimeBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; createTime_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotInfoProto) } static { defaultInstance = new SnapshotInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotInfoProto) } public interface RollingUpgradeStatusProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string blockPoolId = 1; /** * required string blockPoolId = 1; */ boolean hasBlockPoolId(); /** * required string blockPoolId = 1; */ java.lang.String getBlockPoolId(); /** * required string blockPoolId = 1; */ com.google.protobuf.ByteString getBlockPoolIdBytes(); // optional bool finalized = 2 [default = false]; /** * optional bool finalized = 2 [default = false]; */ boolean hasFinalized(); /** * optional bool finalized = 2 [default = false]; */ boolean getFinalized(); } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} * *
   **
   * Rolling upgrade status
   * 
*/ public static final class RollingUpgradeStatusProto extends com.google.protobuf.GeneratedMessage implements RollingUpgradeStatusProtoOrBuilder { // Use RollingUpgradeStatusProto.newBuilder() to construct. private RollingUpgradeStatusProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollingUpgradeStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollingUpgradeStatusProto defaultInstance; public static RollingUpgradeStatusProto getDefaultInstance() { return defaultInstance; } public RollingUpgradeStatusProto getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollingUpgradeStatusProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; blockPoolId_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; finalized_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RollingUpgradeStatusProto parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RollingUpgradeStatusProto(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string blockPoolId = 1; public static final int BLOCKPOOLID_FIELD_NUMBER = 1; private java.lang.Object blockPoolId_; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { blockPoolId_ = s; } return s; } } /** * required string blockPoolId = 1; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } // optional bool finalized = 2 [default = false]; public static final int FINALIZED_FIELD_NUMBER = 2; private boolean finalized_; /** * optional bool finalized = 2 [default = false]; */ public boolean hasFinalized() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool finalized = 2 [default = false]; */ public boolean getFinalized() { return finalized_; } private void initFields() { blockPoolId_ = ""; finalized_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlockPoolId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, finalized_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getBlockPoolIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, finalized_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) obj; boolean result = true; result = result && (hasBlockPoolId() == other.hasBlockPoolId()); if (hasBlockPoolId()) { result = result && getBlockPoolId() .equals(other.getBlockPoolId()); } result = result && (hasFinalized() == other.hasFinalized()); if (hasFinalized()) { result = result && (getFinalized() == other.getFinalized()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlockPoolId()) { hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; hash = (53 * hash) + getBlockPoolId().hashCode(); } if (hasFinalized()) { hash = (37 * hash) + FINALIZED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getFinalized()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto} * *
     **
     * Rolling upgrade status
     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); blockPoolId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); finalized_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto build() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.blockPoolId_ = blockPoolId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.finalized_ = finalized_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) return this; if (other.hasBlockPoolId()) { bitField0_ |= 0x00000001; blockPoolId_ = other.blockPoolId_; onChanged(); } if (other.hasFinalized()) { setFinalized(other.getFinalized()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlockPoolId()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string blockPoolId = 1; private java.lang.Object blockPoolId_ = ""; /** * required string blockPoolId = 1; */ public boolean hasBlockPoolId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string blockPoolId = 1; */ public java.lang.String getBlockPoolId() { java.lang.Object ref = blockPoolId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); blockPoolId_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string blockPoolId = 1; */ public com.google.protobuf.ByteString getBlockPoolIdBytes() { java.lang.Object ref = blockPoolId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); blockPoolId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * required string blockPoolId = 1; */ public Builder setBlockPoolId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000001); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /** * required string blockPoolId = 1; */ public Builder setBlockPoolIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; blockPoolId_ = value; onChanged(); return this; } // optional bool finalized = 2 [default = false]; private boolean finalized_ ; /** * optional bool finalized = 2 [default = false]; */ public boolean hasFinalized() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool finalized = 2 [default = false]; */ public boolean getFinalized() { return finalized_; } /** * optional bool finalized = 2 [default = false]; */ public Builder setFinalized(boolean value) { bitField0_ |= 0x00000002; finalized_ = value; onChanged(); return this; } /** * optional bool finalized = 2 [default = false]; */ public Builder clearFinalized() { bitField0_ = (bitField0_ & ~0x00000002); finalized_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeStatusProto) } static { defaultInstance = new RollingUpgradeStatusProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeStatusProto) } private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeIDProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageReportProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ContentSummaryProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsPermissionProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageTypesProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageUuidsProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlockProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CipherOptionProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DirectoryListingProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_StorageInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_BlockKeyProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_VersionRequestProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_VersionResponseProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\nhdfs.proto\022\013hadoop.hdfs\032\016Security.prot" + "o\"c\n\022ExtendedBlockProto\022\016\n\006poolId\030\001 \002(\t\022" + "\017\n\007blockId\030\002 \002(\004\022\027\n\017generationStamp\030\003 \002(" + "\004\022\023\n\010numBytes\030\004 \001(\004:\0010\"\231\001\n\017DatanodeIDPro" + "to\022\016\n\006ipAddr\030\001 \002(\t\022\020\n\010hostName\030\002 \002(\t\022\024\n\014" + "datanodeUuid\030\003 \002(\t\022\020\n\010xferPort\030\004 \002(\r\022\020\n\010" + "infoPort\030\005 \002(\r\022\017\n\007ipcPort\030\006 \002(\r\022\031\n\016infoS" + "ecurePort\030\007 \001(\r:\0010\"X\n\026DatanodeLocalInfoP" + "roto\022\027\n\017softwareVersion\030\001 \002(\t\022\025\n\rconfigV" + "ersion\030\002 \002(\t\022\016\n\006uptime\030\003 \002(\004\"G\n\022Datanode", "InfosProto\0221\n\tdatanodes\030\001 \003(\0132\036.hadoop.h" + "dfs.DatanodeInfoProto\"\272\003\n\021DatanodeInfoPr" + "oto\022(\n\002id\030\001 \002(\0132\034.hadoop.hdfs.DatanodeID" + "Proto\022\023\n\010capacity\030\002 \001(\004:\0010\022\022\n\007dfsUsed\030\003 " + "\001(\004:\0010\022\024\n\tremaining\030\004 \001(\004:\0010\022\030\n\rblockPoo" + "lUsed\030\005 \001(\004:\0010\022\025\n\nlastUpdate\030\006 \001(\004:\0010\022\027\n" + "\014xceiverCount\030\007 \001(\r:\0010\022\020\n\010location\030\010 \001(\t" + "\022E\n\nadminState\030\n \001(\0162).hadoop.hdfs.Datan" + "odeInfoProto.AdminState:\006NORMAL\022\030\n\rcache" + "Capacity\030\013 \001(\004:\0010\022\024\n\tcacheUsed\030\014 \001(\004:\0010\022", "\036\n\023lastUpdateMonotonic\030\r \001(\004:\0010\"I\n\nAdmin" + "State\022\n\n\006NORMAL\020\000\022\033\n\027DECOMMISSION_INPROG" + "RESS\020\001\022\022\n\016DECOMMISSIONED\020\002\"\336\001\n\024DatanodeS" + "torageProto\022\023\n\013storageUuid\030\001 \002(\t\022E\n\005stat" + "e\030\002 \001(\0162..hadoop.hdfs.DatanodeStoragePro" + "to.StorageState:\006NORMAL\0228\n\013storageType\030\003" + " \001(\0162\035.hadoop.hdfs.StorageTypeProto:\004DIS" + "K\"0\n\014StorageState\022\n\n\006NORMAL\020\000\022\024\n\020READ_ON" + "LY_SHARED\020\001\"\321\001\n\022StorageReportProto\022\027\n\013st" + "orageUuid\030\001 \002(\tB\002\030\001\022\025\n\006failed\030\002 \001(\010:\005fal", "se\022\023\n\010capacity\030\003 \001(\004:\0010\022\022\n\007dfsUsed\030\004 \001(\004" + ":\0010\022\024\n\tremaining\030\005 \001(\004:\0010\022\030\n\rblockPoolUs" + "ed\030\006 \001(\004:\0010\0222\n\007storage\030\007 \001(\0132!.hadoop.hd" + "fs.DatanodeStorageProto\"\313\001\n\023ContentSumma" + "ryProto\022\016\n\006length\030\001 \002(\004\022\021\n\tfileCount\030\002 \002" + "(\004\022\026\n\016directoryCount\030\003 \002(\004\022\r\n\005quota\030\004 \002(" + "\004\022\025\n\rspaceConsumed\030\005 \002(\004\022\022\n\nspaceQuota\030\006" + " \002(\004\022?\n\016typeQuotaInfos\030\007 \001(\0132\'.hadoop.hd" + "fs.StorageTypeQuotaInfosProto\"[\n\032Storage" + "TypeQuotaInfosProto\022=\n\rtypeQuotaInfo\030\001 \003", "(\0132&.hadoop.hdfs.StorageTypeQuotaInfoPro" + "to\"i\n\031StorageTypeQuotaInfoProto\022+\n\004type\030" + "\001 \002(\0162\035.hadoop.hdfs.StorageTypeProto\022\r\n\005" + "quota\030\002 \002(\004\022\020\n\010consumed\030\003 \002(\004\"7\n\026Corrupt" + "FileBlocksProto\022\r\n\005files\030\001 \003(\t\022\016\n\006cookie" + "\030\002 \002(\t\"!\n\021FsPermissionProto\022\014\n\004perm\030\001 \002(" + "\r\"H\n\021StorageTypesProto\0223\n\014storageTypes\030\001" + " \003(\0162\035.hadoop.hdfs.StorageTypeProto\"\364\001\n\027" + "BlockStoragePolicyProto\022\020\n\010policyId\030\001 \002(" + "\r\022\014\n\004name\030\002 \002(\t\0226\n\016creationPolicy\030\003 \002(\0132", "\036.hadoop.hdfs.StorageTypesProto\022>\n\026creat" + "ionFallbackPolicy\030\004 \001(\0132\036.hadoop.hdfs.St" + "orageTypesProto\022A\n\031replicationFallbackPo" + "licy\030\005 \001(\0132\036.hadoop.hdfs.StorageTypesPro" + "to\")\n\021StorageUuidsProto\022\024\n\014storageUuids\030" + "\001 \003(\t\"\234\002\n\021LocatedBlockProto\022*\n\001b\030\001 \002(\0132\037" + ".hadoop.hdfs.ExtendedBlockProto\022\016\n\006offse" + "t\030\002 \002(\004\022,\n\004locs\030\003 \003(\0132\036.hadoop.hdfs.Data" + "nodeInfoProto\022\017\n\007corrupt\030\004 \002(\010\022-\n\nblockT" + "oken\030\005 \002(\0132\031.hadoop.common.TokenProto\022\024\n", "\010isCached\030\006 \003(\010B\002\020\001\0223\n\014storageTypes\030\007 \003(" + "\0162\035.hadoop.hdfs.StorageTypeProto\022\022\n\nstor" + "ageIDs\030\010 \003(\t\"\223\001\n\026DataEncryptionKeyProto\022" + "\r\n\005keyId\030\001 \002(\r\022\023\n\013blockPoolId\030\002 \002(\t\022\r\n\005n" + "once\030\003 \002(\014\022\025\n\rencryptionKey\030\004 \002(\014\022\022\n\nexp" + "iryDate\030\005 \002(\004\022\033\n\023encryptionAlgorithm\030\006 \001" + "(\t\"\323\001\n\027FileEncryptionInfoProto\022,\n\005suite\030" + "\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProto\022F\n\025" + "cryptoProtocolVersion\030\002 \002(\0162\'.hadoop.hdf" + "s.CryptoProtocolVersionProto\022\013\n\003key\030\003 \002(", "\014\022\n\n\002iv\030\004 \002(\014\022\017\n\007keyName\030\005 \002(\t\022\030\n\020ezKeyV" + "ersionName\030\006 \002(\t\"O\n\032PerFileEncryptionInf" + "oProto\022\013\n\003key\030\001 \002(\014\022\n\n\002iv\030\002 \002(\014\022\030\n\020ezKey" + "VersionName\030\003 \002(\t\"\240\001\n\027ZoneEncryptionInfo" + "Proto\022,\n\005suite\030\001 \002(\0162\035.hadoop.hdfs.Ciphe" + "rSuiteProto\022F\n\025cryptoProtocolVersion\030\002 \002" + "(\0162\'.hadoop.hdfs.CryptoProtocolVersionPr" + "oto\022\017\n\007keyName\030\003 \002(\t\"}\n\021CipherOptionProt" + "o\022,\n\005suite\030\001 \002(\0162\035.hadoop.hdfs.CipherSui" + "teProto\022\r\n\005inKey\030\002 \001(\014\022\014\n\004inIv\030\003 \001(\014\022\016\n\006", "outKey\030\004 \001(\014\022\r\n\005outIv\030\005 \001(\014\"\205\002\n\022LocatedB" + "locksProto\022\022\n\nfileLength\030\001 \002(\004\022.\n\006blocks" + "\030\002 \003(\0132\036.hadoop.hdfs.LocatedBlockProto\022\031" + "\n\021underConstruction\030\003 \002(\010\0221\n\tlastBlock\030\004" + " \001(\0132\036.hadoop.hdfs.LocatedBlockProto\022\033\n\023" + "isLastBlockComplete\030\005 \002(\010\022@\n\022fileEncrypt" + "ionInfo\030\006 \001(\0132$.hadoop.hdfs.FileEncrypti" + "onInfoProto\"\250\004\n\023HdfsFileStatusProto\022;\n\010f" + "ileType\030\001 \002(\0162).hadoop.hdfs.HdfsFileStat" + "usProto.FileType\022\014\n\004path\030\002 \002(\014\022\016\n\006length", "\030\003 \002(\004\0222\n\npermission\030\004 \002(\0132\036.hadoop.hdfs" + ".FsPermissionProto\022\r\n\005owner\030\005 \002(\t\022\r\n\005gro" + "up\030\006 \002(\t\022\031\n\021modification_time\030\007 \002(\004\022\023\n\013a" + "ccess_time\030\010 \002(\004\022\017\n\007symlink\030\t \001(\014\022\034\n\021blo" + "ck_replication\030\n \001(\r:\0010\022\024\n\tblocksize\030\013 \001" + "(\004:\0010\0222\n\tlocations\030\014 \001(\0132\037.hadoop.hdfs.L" + "ocatedBlocksProto\022\021\n\006fileId\030\r \001(\004:\0010\022\027\n\013" + "childrenNum\030\016 \001(\005:\002-1\022@\n\022fileEncryptionI" + "nfo\030\017 \001(\0132$.hadoop.hdfs.FileEncryptionIn" + "foProto\022\030\n\rstoragePolicy\030\020 \001(\r:\0010\"3\n\010Fil", "eType\022\n\n\006IS_DIR\020\001\022\013\n\007IS_FILE\020\002\022\016\n\nIS_SYM" + "LINK\020\003\"\216\002\n\025FsServerDefaultsProto\022\021\n\tbloc" + "kSize\030\001 \002(\004\022\030\n\020bytesPerChecksum\030\002 \002(\r\022\027\n" + "\017writePacketSize\030\003 \002(\r\022\023\n\013replication\030\004 " + "\002(\r\022\026\n\016fileBufferSize\030\005 \002(\r\022\"\n\023encryptDa" + "taTransfer\030\006 \001(\010:\005false\022\030\n\rtrashInterval" + "\030\007 \001(\004:\0010\022D\n\014checksumType\030\010 \001(\0162\036.hadoop" + ".hdfs.ChecksumTypeProto:\016CHECKSUM_CRC32\"" + "k\n\025DirectoryListingProto\0228\n\016partialListi" + "ng\030\001 \003(\0132 .hadoop.hdfs.HdfsFileStatusPro", "to\022\030\n\020remainingEntries\030\002 \002(\r\"\242\001\n!Snapsho" + "ttableDirectoryStatusProto\0223\n\tdirStatus\030" + "\001 \002(\0132 .hadoop.hdfs.HdfsFileStatusProto\022" + "\026\n\016snapshot_quota\030\002 \002(\r\022\027\n\017snapshot_numb" + "er\030\003 \002(\r\022\027\n\017parent_fullpath\030\004 \002(\014\"u\n\"Sna" + "pshottableDirectoryListingProto\022O\n\027snaps" + "hottableDirListing\030\001 \003(\0132..hadoop.hdfs.S" + "napshottableDirectoryStatusProto\"_\n\034Snap" + "shotDiffReportEntryProto\022\020\n\010fullpath\030\001 \002" + "(\014\022\031\n\021modificationLabel\030\002 \002(\t\022\022\n\ntargetP", "ath\030\003 \001(\014\"\237\001\n\027SnapshotDiffReportProto\022\024\n" + "\014snapshotRoot\030\001 \002(\t\022\024\n\014fromSnapshot\030\002 \002(" + "\t\022\022\n\ntoSnapshot\030\003 \002(\t\022D\n\021diffReportEntri" + "es\030\004 \003(\0132).hadoop.hdfs.SnapshotDiffRepor" + "tEntryProto\"_\n\020StorageInfoProto\022\025\n\rlayou" + "tVersion\030\001 \002(\r\022\022\n\nnamespceID\030\002 \002(\r\022\021\n\tcl" + "usterID\030\003 \002(\t\022\r\n\005cTime\030\004 \002(\004\"\211\002\n\031Namenod" + "eRegistrationProto\022\022\n\nrpcAddress\030\001 \002(\t\022\023" + "\n\013httpAddress\030\002 \002(\t\0222\n\013storageInfo\030\003 \002(\013" + "2\035.hadoop.hdfs.StorageInfoProto\022P\n\004role\030", "\004 \001(\01628.hadoop.hdfs.NamenodeRegistration" + "Proto.NamenodeRoleProto:\010NAMENODE\"=\n\021Nam" + "enodeRoleProto\022\014\n\010NAMENODE\020\001\022\n\n\006BACKUP\020\002" + "\022\016\n\nCHECKPOINT\020\003\"\235\001\n\030CheckpointSignature" + "Proto\022\023\n\013blockPoolId\030\001 \002(\t\022 \n\030mostRecent" + "CheckpointTxId\030\002 \002(\004\022\026\n\016curSegmentTxId\030\003" + " \002(\004\0222\n\013storageInfo\030\004 \002(\0132\035.hadoop.hdfs." + "StorageInfoProto\"\314\001\n\024NamenodeCommandProt" + "o\022\016\n\006action\030\001 \002(\r\0224\n\004type\030\002 \002(\0162&.hadoop" + ".hdfs.NamenodeCommandProto.Type\022:\n\rcheck", "pointCmd\030\003 \001(\0132#.hadoop.hdfs.CheckpointC" + "ommandProto\"2\n\004Type\022\023\n\017NamenodeCommand\020\000" + "\022\025\n\021CheckPointCommand\020\001\"m\n\026CheckpointCom" + "mandProto\0228\n\tsignature\030\001 \002(\0132%.hadoop.hd" + "fs.CheckpointSignatureProto\022\031\n\021needToRet" + "urnImage\030\002 \002(\010\"D\n\nBlockProto\022\017\n\007blockId\030" + "\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\023\n\010numBytes\030\003 \001(" + "\004:\0010\"\243\001\n\027BlockWithLocationsProto\022&\n\005bloc" + "k\030\001 \002(\0132\027.hadoop.hdfs.BlockProto\022\025\n\rdata" + "nodeUuids\030\002 \003(\t\022\024\n\014storageUuids\030\003 \003(\t\0223\n", "\014storageTypes\030\004 \003(\0162\035.hadoop.hdfs.Storag" + "eTypeProto\"P\n\030BlocksWithLocationsProto\0224" + "\n\006blocks\030\001 \003(\0132$.hadoop.hdfs.BlockWithLo" + "cationsProto\"U\n\022RemoteEditLogProto\022\021\n\tst" + "artTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\033\n\014isInPr" + "ogress\030\003 \001(\010:\005false\"K\n\032RemoteEditLogMani" + "festProto\022-\n\004logs\030\001 \003(\0132\037.hadoop.hdfs.Re" + "moteEditLogProto\"\265\001\n\022NamespaceInfoProto\022" + "\024\n\014buildVersion\030\001 \002(\t\022\016\n\006unused\030\002 \002(\r\022\023\n" + "\013blockPoolID\030\003 \002(\t\0222\n\013storageInfo\030\004 \002(\0132", "\035.hadoop.hdfs.StorageInfoProto\022\027\n\017softwa" + "reVersion\030\005 \002(\t\022\027\n\014capabilities\030\006 \001(\004:\0010" + "\"D\n\rBlockKeyProto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpi" + "ryDate\030\002 \002(\004\022\020\n\010keyBytes\030\003 \001(\014\"\304\001\n\026Expor" + "tedBlockKeysProto\022\033\n\023isBlockTokenEnabled" + "\030\001 \002(\010\022\031\n\021keyUpdateInterval\030\002 \002(\004\022\025\n\rtok" + "enLifeTime\030\003 \002(\004\022.\n\ncurrentKey\030\004 \002(\0132\032.h" + "adoop.hdfs.BlockKeyProto\022+\n\007allKeys\030\005 \003(" + "\0132\032.hadoop.hdfs.BlockKeyProto\"\212\001\n\024Recove" + "ringBlockProto\022\023\n\013newGenStamp\030\001 \002(\004\022-\n\005b", "lock\030\002 \002(\0132\036.hadoop.hdfs.LocatedBlockPro" + "to\022.\n\rtruncateBlock\030\003 \001(\0132\027.hadoop.hdfs." + "BlockProto\"\025\n\023VersionRequestProto\"E\n\024Ver" + "sionResponseProto\022-\n\004info\030\001 \002(\0132\037.hadoop" + ".hdfs.NamespaceInfoProto\"\245\001\n\021SnapshotInf" + "oProto\022\024\n\014snapshotName\030\001 \002(\t\022\024\n\014snapshot" + "Root\030\002 \002(\t\0222\n\npermission\030\003 \002(\0132\036.hadoop." + "hdfs.FsPermissionProto\022\r\n\005owner\030\004 \002(\t\022\r\n" + "\005group\030\005 \002(\t\022\022\n\ncreateTime\030\006 \002(\t\"J\n\031Roll" + "ingUpgradeStatusProto\022\023\n\013blockPoolId\030\001 \002", "(\t\022\030\n\tfinalized\030\002 \001(\010:\005false*@\n\020StorageT" + "ypeProto\022\010\n\004DISK\020\001\022\007\n\003SSD\020\002\022\013\n\007ARCHIVE\020\003" + "\022\014\n\010RAM_DISK\020\004*6\n\020CipherSuiteProto\022\013\n\007UN" + "KNOWN\020\001\022\025\n\021AES_CTR_NOPADDING\020\002*P\n\032Crypto" + "ProtocolVersionProto\022\034\n\030UNKNOWN_PROTOCOL" + "_VERSION\020\001\022\024\n\020ENCRYPTION_ZONES\020\002*O\n\021Chec" + "ksumTypeProto\022\021\n\rCHECKSUM_NULL\020\000\022\022\n\016CHEC" + "KSUM_CRC32\020\001\022\023\n\017CHECKSUM_CRC32C\020\002*L\n\021Rep" + "licaStateProto\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007" + "\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.a", "pache.hadoop.hdfs.protocol.protoB\nHdfsPr" + "otos\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor, new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", }); internal_static_hadoop_hdfs_DatanodeIDProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeIDProto_descriptor, new java.lang.String[] { "IpAddr", "HostName", "DatanodeUuid", "XferPort", "InfoPort", "IpcPort", "InfoSecurePort", }); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor, new java.lang.String[] { "SoftwareVersion", "ConfigVersion", "Uptime", }); internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor, new java.lang.String[] { "Datanodes", }); internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor, new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "AdminState", "CacheCapacity", "CacheUsed", "LastUpdateMonotonic", }); internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor, new java.lang.String[] { "StorageUuid", "State", "StorageType", }); internal_static_hadoop_hdfs_StorageReportProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageReportProto_descriptor, new java.lang.String[] { "StorageUuid", "Failed", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "Storage", }); internal_static_hadoop_hdfs_ContentSummaryProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ContentSummaryProto_descriptor, new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor, new java.lang.String[] { "TypeQuotaInfo", }); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor, new java.lang.String[] { "Type", "Quota", "Consumed", }); internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor, new java.lang.String[] { "Files", "Cookie", }); internal_static_hadoop_hdfs_FsPermissionProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FsPermissionProto_descriptor, new java.lang.String[] { "Perm", }); internal_static_hadoop_hdfs_StorageTypesProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageTypesProto_descriptor, new java.lang.String[] { "StorageTypes", }); internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor, new java.lang.String[] { "PolicyId", "Name", "CreationPolicy", "CreationFallbackPolicy", "ReplicationFallbackPolicy", }); internal_static_hadoop_hdfs_StorageUuidsProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageUuidsProto_descriptor, new java.lang.String[] { "StorageUuids", }); internal_static_hadoop_hdfs_LocatedBlockProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlockProto_descriptor, new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", "IsCached", "StorageTypes", "StorageIDs", }); internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor, new java.lang.String[] { "KeyId", "BlockPoolId", "Nonce", "EncryptionKey", "ExpiryDate", "EncryptionAlgorithm", }); internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "Key", "Iv", "KeyName", "EzKeyVersionName", }); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor, new java.lang.String[] { "Key", "Iv", "EzKeyVersionName", }); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor, new java.lang.String[] { "Suite", "CryptoProtocolVersion", "KeyName", }); internal_static_hadoop_hdfs_CipherOptionProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CipherOptionProto_descriptor, new java.lang.String[] { "Suite", "InKey", "InIv", "OutKey", "OutIv", }); internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor, new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", "FileEncryptionInfo", }); internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor, new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", "FileId", "ChildrenNum", "FileEncryptionInfo", "StoragePolicy", }); internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor, new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", "EncryptDataTransfer", "TrashInterval", "ChecksumType", }); internal_static_hadoop_hdfs_DirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DirectoryListingProto_descriptor, new java.lang.String[] { "PartialListing", "RemainingEntries", }); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor, new java.lang.String[] { "DirStatus", "SnapshotQuota", "SnapshotNumber", "ParentFullpath", }); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor, new java.lang.String[] { "SnapshottableDirListing", }); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor, new java.lang.String[] { "Fullpath", "ModificationLabel", "TargetPath", }); internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor = getDescriptor().getMessageTypes().get(28); internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor, new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", "DiffReportEntries", }); internal_static_hadoop_hdfs_StorageInfoProto_descriptor = getDescriptor().getMessageTypes().get(29); internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_StorageInfoProto_descriptor, new java.lang.String[] { "LayoutVersion", "NamespceID", "ClusterID", "CTime", }); internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor = getDescriptor().getMessageTypes().get(30); internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor, new java.lang.String[] { "RpcAddress", "HttpAddress", "StorageInfo", "Role", }); internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor = getDescriptor().getMessageTypes().get(31); internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor, new java.lang.String[] { "BlockPoolId", "MostRecentCheckpointTxId", "CurSegmentTxId", "StorageInfo", }); internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor = getDescriptor().getMessageTypes().get(32); internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor, new java.lang.String[] { "Action", "Type", "CheckpointCmd", }); internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor = getDescriptor().getMessageTypes().get(33); internal_static_hadoop_hdfs_CheckpointCommandProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CheckpointCommandProto_descriptor, new java.lang.String[] { "Signature", "NeedToReturnImage", }); internal_static_hadoop_hdfs_BlockProto_descriptor = getDescriptor().getMessageTypes().get(34); internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_BlockProto_descriptor, new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", }); internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor = getDescriptor().getMessageTypes().get(35); internal_static_hadoop_hdfs_BlockWithLocationsProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_BlockWithLocationsProto_descriptor, new java.lang.String[] { "Block", "DatanodeUuids", "StorageUuids", "StorageTypes", }); internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor = getDescriptor().getMessageTypes().get(36); internal_static_hadoop_hdfs_BlocksWithLocationsProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_BlocksWithLocationsProto_descriptor, new java.lang.String[] { "Blocks", }); internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor = getDescriptor().getMessageTypes().get(37); internal_static_hadoop_hdfs_RemoteEditLogProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoteEditLogProto_descriptor, new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", }); internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor = getDescriptor().getMessageTypes().get(38); internal_static_hadoop_hdfs_RemoteEditLogManifestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoteEditLogManifestProto_descriptor, new java.lang.String[] { "Logs", }); internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor = getDescriptor().getMessageTypes().get(39); internal_static_hadoop_hdfs_NamespaceInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_NamespaceInfoProto_descriptor, new java.lang.String[] { "BuildVersion", "Unused", "BlockPoolID", "StorageInfo", "SoftwareVersion", "Capabilities", }); internal_static_hadoop_hdfs_BlockKeyProto_descriptor = getDescriptor().getMessageTypes().get(40); internal_static_hadoop_hdfs_BlockKeyProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_BlockKeyProto_descriptor, new java.lang.String[] { "KeyId", "ExpiryDate", "KeyBytes", }); internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor = getDescriptor().getMessageTypes().get(41); internal_static_hadoop_hdfs_ExportedBlockKeysProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ExportedBlockKeysProto_descriptor, new java.lang.String[] { "IsBlockTokenEnabled", "KeyUpdateInterval", "TokenLifeTime", "CurrentKey", "AllKeys", }); internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor = getDescriptor().getMessageTypes().get(42); internal_static_hadoop_hdfs_RecoveringBlockProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RecoveringBlockProto_descriptor, new java.lang.String[] { "NewGenStamp", "Block", "TruncateBlock", }); internal_static_hadoop_hdfs_VersionRequestProto_descriptor = getDescriptor().getMessageTypes().get(43); internal_static_hadoop_hdfs_VersionRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_VersionRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_VersionResponseProto_descriptor = getDescriptor().getMessageTypes().get(44); internal_static_hadoop_hdfs_VersionResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_VersionResponseProto_descriptor, new java.lang.String[] { "Info", }); internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor = getDescriptor().getMessageTypes().get(45); internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor, new java.lang.String[] { "SnapshotName", "SnapshotRoot", "Permission", "Owner", "Group", "CreateTime", }); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor = getDescriptor().getMessageTypes().get(46); internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor, new java.lang.String[] { "BlockPoolId", "Finalized", }); return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(), }, assigner); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy